2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
43 #include <linux/phy.h>
44 #include <linux/fec.h>
46 #include <asm/cacheflush.h>
48 #ifndef CONFIG_ARCH_MXC
49 #include <asm/coldfire.h>
50 #include <asm/mcfsim.h>
55 #ifdef CONFIG_ARCH_MXC
56 #include <mach/hardware.h>
57 #define FEC_ALIGNMENT 0xf
59 #define FEC_ALIGNMENT 0x3
62 static unsigned char macaddr[ETH_ALEN];
63 module_param_array(macaddr, byte, NULL, 0);
64 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
66 #if defined(CONFIG_M5272)
68 * Some hardware gets it MAC address out of local flash memory.
69 * if this is non-zero then assume it is the address to get MAC from.
71 #if defined(CONFIG_NETtel)
72 #define FEC_FLASHMAC 0xf0006006
73 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
74 #define FEC_FLASHMAC 0xf0006000
75 #elif defined(CONFIG_CANCam)
76 #define FEC_FLASHMAC 0xf0020000
77 #elif defined (CONFIG_M5272C3)
78 #define FEC_FLASHMAC (0xffe04000 + 4)
79 #elif defined(CONFIG_MOD5272)
80 #define FEC_FLASHMAC 0xffc0406b
82 #define FEC_FLASHMAC 0
84 #endif /* CONFIG_M5272 */
86 /* The number of Tx and Rx buffers. These are allocated from the page
87 * pool. The code may assume these are power of two, so it it best
88 * to keep them that size.
89 * We don't need to allocate pages for the transmitter. We just use
90 * the skbuffer directly.
92 #define FEC_ENET_RX_PAGES 8
93 #define FEC_ENET_RX_FRSIZE 2048
94 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
95 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
96 #define FEC_ENET_TX_FRSIZE 2048
97 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
98 #define TX_RING_SIZE 16 /* Must be power of two */
99 #define TX_RING_MOD_MASK 15 /* for this to work */
101 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
102 #error "FEC: descriptor ring size constants too large"
105 /* Interrupt events/masks. */
106 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
107 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
108 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
109 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
110 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
111 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
112 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
113 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
114 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
115 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
117 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
119 /* The FEC stores dest/src/type, data, and checksum for receive packets.
121 #define PKT_MAXBUF_SIZE 1518
122 #define PKT_MINBUF_SIZE 64
123 #define PKT_MAXBLR_SIZE 1520
127 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
128 * size bits. Other FEC hardware does not, so we need to take that into
129 * account when setting it.
131 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
132 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
133 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
135 #define OPT_FRAME_SIZE 0
138 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
139 * tx_bd_base always point to the base of the buffer descriptors. The
140 * cur_rx and cur_tx point to the currently available buffer.
141 * The dirty_tx tracks the current buffer that is being sent by the
142 * controller. The cur_tx and dirty_tx are equal under both completely
143 * empty and completely full conditions. The empty/ready indicator in
144 * the buffer descriptor determines the actual condition.
146 struct fec_enet_private {
147 /* Hardware registers of the FEC device */
150 struct net_device *netdev;
154 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
155 unsigned char *tx_bounce[TX_RING_SIZE];
156 struct sk_buff* tx_skbuff[TX_RING_SIZE];
157 struct sk_buff* rx_skbuff[RX_RING_SIZE];
161 /* CPM dual port RAM relative addresses */
163 /* Address of Rx and Tx buffers */
164 struct bufdesc *rx_bd_base;
165 struct bufdesc *tx_bd_base;
166 /* The next free ring entry */
167 struct bufdesc *cur_rx, *cur_tx;
168 /* The ring entries to be free()ed */
169 struct bufdesc *dirty_tx;
172 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
175 struct platform_device *pdev;
179 /* Phylib and MDIO interface */
180 struct mii_bus *mii_bus;
181 struct phy_device *phy_dev;
184 phy_interface_t phy_interface;
187 struct completion mdio_done;
190 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
191 static void fec_enet_tx(struct net_device *dev);
192 static void fec_enet_rx(struct net_device *dev);
193 static int fec_enet_close(struct net_device *dev);
194 static void fec_restart(struct net_device *dev, int duplex);
195 static void fec_stop(struct net_device *dev);
197 /* FEC MII MMFR bits definition */
198 #define FEC_MMFR_ST (1 << 30)
199 #define FEC_MMFR_OP_READ (2 << 28)
200 #define FEC_MMFR_OP_WRITE (1 << 28)
201 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
202 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
203 #define FEC_MMFR_TA (2 << 16)
204 #define FEC_MMFR_DATA(v) (v & 0xffff)
206 #define FEC_MII_TIMEOUT 1000 /* us */
208 /* Transmitter timeout */
209 #define TX_TIMEOUT (2 * HZ)
212 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
214 struct fec_enet_private *fep = netdev_priv(dev);
217 unsigned short status;
221 /* Link is down or autonegotiation is in progress. */
222 return NETDEV_TX_BUSY;
225 spin_lock_irqsave(&fep->hw_lock, flags);
226 /* Fill in a Tx ring entry */
229 status = bdp->cbd_sc;
231 if (status & BD_ENET_TX_READY) {
232 /* Ooops. All transmit buffers are full. Bail out.
233 * This should not happen, since dev->tbusy should be set.
235 printk("%s: tx queue full!.\n", dev->name);
236 spin_unlock_irqrestore(&fep->hw_lock, flags);
237 return NETDEV_TX_BUSY;
240 /* Clear all of the status flags */
241 status &= ~BD_ENET_TX_STATS;
243 /* Set buffer length and buffer pointer */
245 bdp->cbd_datlen = skb->len;
248 * On some FEC implementations data must be aligned on
249 * 4-byte boundaries. Use bounce buffers to copy data
250 * and get it aligned. Ugh.
252 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
254 index = bdp - fep->tx_bd_base;
255 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
256 bufaddr = fep->tx_bounce[index];
259 /* Save skb pointer */
260 fep->tx_skbuff[fep->skb_cur] = skb;
262 dev->stats.tx_bytes += skb->len;
263 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
265 /* Push the data cache so the CPM does not get stale memory
268 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
269 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
271 /* Send it on its way. Tell FEC it's ready, interrupt when done,
272 * it's the last BD of the frame, and to put the CRC on the end.
274 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
275 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
276 bdp->cbd_sc = status;
278 /* Trigger transmission start */
279 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
281 /* If this was the last BD in the ring, start at the beginning again. */
282 if (status & BD_ENET_TX_WRAP)
283 bdp = fep->tx_bd_base;
287 if (bdp == fep->dirty_tx) {
289 netif_stop_queue(dev);
294 spin_unlock_irqrestore(&fep->hw_lock, flags);
300 fec_timeout(struct net_device *dev)
302 struct fec_enet_private *fep = netdev_priv(dev);
304 dev->stats.tx_errors++;
306 fec_restart(dev, fep->full_duplex);
307 netif_wake_queue(dev);
311 fec_enet_interrupt(int irq, void * dev_id)
313 struct net_device *dev = dev_id;
314 struct fec_enet_private *fep = netdev_priv(dev);
316 irqreturn_t ret = IRQ_NONE;
319 int_events = readl(fep->hwp + FEC_IEVENT);
320 writel(int_events, fep->hwp + FEC_IEVENT);
322 if (int_events & FEC_ENET_RXF) {
327 /* Transmit OK, or non-fatal error. Update the buffer
328 * descriptors. FEC handles all errors, we just discover
329 * them as part of the transmit process.
331 if (int_events & FEC_ENET_TXF) {
336 if (int_events & FEC_ENET_MII) {
338 complete(&fep->mdio_done);
340 } while (int_events);
347 fec_enet_tx(struct net_device *dev)
349 struct fec_enet_private *fep;
351 unsigned short status;
354 fep = netdev_priv(dev);
355 spin_lock(&fep->hw_lock);
358 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
359 if (bdp == fep->cur_tx && fep->tx_full == 0)
362 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
363 bdp->cbd_bufaddr = 0;
365 skb = fep->tx_skbuff[fep->skb_dirty];
366 /* Check for errors. */
367 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
368 BD_ENET_TX_RL | BD_ENET_TX_UN |
370 dev->stats.tx_errors++;
371 if (status & BD_ENET_TX_HB) /* No heartbeat */
372 dev->stats.tx_heartbeat_errors++;
373 if (status & BD_ENET_TX_LC) /* Late collision */
374 dev->stats.tx_window_errors++;
375 if (status & BD_ENET_TX_RL) /* Retrans limit */
376 dev->stats.tx_aborted_errors++;
377 if (status & BD_ENET_TX_UN) /* Underrun */
378 dev->stats.tx_fifo_errors++;
379 if (status & BD_ENET_TX_CSL) /* Carrier lost */
380 dev->stats.tx_carrier_errors++;
382 dev->stats.tx_packets++;
385 if (status & BD_ENET_TX_READY)
386 printk("HEY! Enet xmit interrupt and TX_READY.\n");
388 /* Deferred means some collisions occurred during transmit,
389 * but we eventually sent the packet OK.
391 if (status & BD_ENET_TX_DEF)
392 dev->stats.collisions++;
394 /* Free the sk buffer associated with this last transmit */
395 dev_kfree_skb_any(skb);
396 fep->tx_skbuff[fep->skb_dirty] = NULL;
397 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
399 /* Update pointer to next buffer descriptor to be transmitted */
400 if (status & BD_ENET_TX_WRAP)
401 bdp = fep->tx_bd_base;
405 /* Since we have freed up a buffer, the ring is no longer full
409 if (netif_queue_stopped(dev))
410 netif_wake_queue(dev);
414 spin_unlock(&fep->hw_lock);
418 /* During a receive, the cur_rx points to the current incoming buffer.
419 * When we update through the ring, if the next incoming buffer has
420 * not been given to the system, we just set the empty indicator,
421 * effectively tossing the packet.
424 fec_enet_rx(struct net_device *dev)
426 struct fec_enet_private *fep = netdev_priv(dev);
428 unsigned short status;
437 spin_lock(&fep->hw_lock);
439 /* First, grab all of the stats for the incoming packet.
440 * These get messed up if we get called due to a busy condition.
444 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
446 /* Since we have allocated space to hold a complete frame,
447 * the last indicator should be set.
449 if ((status & BD_ENET_RX_LAST) == 0)
450 printk("FEC ENET: rcv is not +last\n");
453 goto rx_processing_done;
455 /* Check for errors. */
456 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
457 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
458 dev->stats.rx_errors++;
459 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
460 /* Frame too long or too short. */
461 dev->stats.rx_length_errors++;
463 if (status & BD_ENET_RX_NO) /* Frame alignment */
464 dev->stats.rx_frame_errors++;
465 if (status & BD_ENET_RX_CR) /* CRC Error */
466 dev->stats.rx_crc_errors++;
467 if (status & BD_ENET_RX_OV) /* FIFO overrun */
468 dev->stats.rx_fifo_errors++;
471 /* Report late collisions as a frame error.
472 * On this error, the BD is closed, but we don't know what we
473 * have in the buffer. So, just drop this frame on the floor.
475 if (status & BD_ENET_RX_CL) {
476 dev->stats.rx_errors++;
477 dev->stats.rx_frame_errors++;
478 goto rx_processing_done;
481 /* Process the incoming frame. */
482 dev->stats.rx_packets++;
483 pkt_len = bdp->cbd_datlen;
484 dev->stats.rx_bytes += pkt_len;
485 data = (__u8*)__va(bdp->cbd_bufaddr);
487 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
490 /* This does 16 byte alignment, exactly what we need.
491 * The packet length includes FCS, but we don't want to
492 * include that when passing upstream as it messes up
493 * bridging applications.
495 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
497 if (unlikely(!skb)) {
498 printk("%s: Memory squeeze, dropping packet.\n",
500 dev->stats.rx_dropped++;
502 skb_reserve(skb, NET_IP_ALIGN);
503 skb_put(skb, pkt_len - 4); /* Make room */
504 skb_copy_to_linear_data(skb, data, pkt_len - 4);
505 skb->protocol = eth_type_trans(skb, dev);
509 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
512 /* Clear the status flags for this buffer */
513 status &= ~BD_ENET_RX_STATS;
515 /* Mark the buffer empty */
516 status |= BD_ENET_RX_EMPTY;
517 bdp->cbd_sc = status;
519 /* Update BD pointer to next entry */
520 if (status & BD_ENET_RX_WRAP)
521 bdp = fep->rx_bd_base;
524 /* Doing this here will keep the FEC running while we process
525 * incoming frames. On a heavily loaded network, we should be
526 * able to keep up at the expense of system resources.
528 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
532 spin_unlock(&fep->hw_lock);
535 /* ------------------------------------------------------------------------- */
536 static void __inline__ fec_get_mac(struct net_device *dev)
538 struct fec_enet_private *fep = netdev_priv(dev);
539 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
540 unsigned char *iap, tmpaddr[ETH_ALEN];
543 * try to get mac address in following order:
545 * 1) module parameter via kernel command line in form
546 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
551 * 2) from flash or fuse (via platform data)
553 if (!is_valid_ether_addr(iap)) {
556 iap = (unsigned char *)FEC_FLASHMAC;
559 memcpy(iap, pdata->mac, ETH_ALEN);
564 * 3) FEC mac registers set by bootloader
566 if (!is_valid_ether_addr(iap)) {
567 *((unsigned long *) &tmpaddr[0]) =
568 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
569 *((unsigned short *) &tmpaddr[4]) =
570 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
574 memcpy(dev->dev_addr, iap, ETH_ALEN);
576 /* Adjust MAC if using macaddr */
578 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
581 /* ------------------------------------------------------------------------- */
586 static void fec_enet_adjust_link(struct net_device *dev)
588 struct fec_enet_private *fep = netdev_priv(dev);
589 struct phy_device *phy_dev = fep->phy_dev;
592 int status_change = 0;
594 spin_lock_irqsave(&fep->hw_lock, flags);
596 /* Prevent a state halted on mii error */
597 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
598 phy_dev->state = PHY_RESUMING;
602 /* Duplex link change */
604 if (fep->full_duplex != phy_dev->duplex) {
605 fec_restart(dev, phy_dev->duplex);
610 /* Link on or off change */
611 if (phy_dev->link != fep->link) {
612 fep->link = phy_dev->link;
614 fec_restart(dev, phy_dev->duplex);
621 spin_unlock_irqrestore(&fep->hw_lock, flags);
624 phy_print_status(phy_dev);
627 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
629 struct fec_enet_private *fep = bus->priv;
630 unsigned long time_left;
632 fep->mii_timeout = 0;
633 init_completion(&fep->mdio_done);
635 /* start a read op */
636 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
637 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
638 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
640 /* wait for end of transfer */
641 time_left = wait_for_completion_timeout(&fep->mdio_done,
642 usecs_to_jiffies(FEC_MII_TIMEOUT));
643 if (time_left == 0) {
644 fep->mii_timeout = 1;
645 printk(KERN_ERR "FEC: MDIO read timeout\n");
650 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
653 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
656 struct fec_enet_private *fep = bus->priv;
657 unsigned long time_left;
659 fep->mii_timeout = 0;
660 init_completion(&fep->mdio_done);
662 /* start a write op */
663 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
664 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
665 FEC_MMFR_TA | FEC_MMFR_DATA(value),
666 fep->hwp + FEC_MII_DATA);
668 /* wait for end of transfer */
669 time_left = wait_for_completion_timeout(&fep->mdio_done,
670 usecs_to_jiffies(FEC_MII_TIMEOUT));
671 if (time_left == 0) {
672 fep->mii_timeout = 1;
673 printk(KERN_ERR "FEC: MDIO write timeout\n");
680 static int fec_enet_mdio_reset(struct mii_bus *bus)
685 static int fec_enet_mii_probe(struct net_device *dev)
687 struct fec_enet_private *fep = netdev_priv(dev);
688 struct phy_device *phy_dev = NULL;
689 char mdio_bus_id[MII_BUS_ID_SIZE];
690 char phy_name[MII_BUS_ID_SIZE + 3];
695 /* check for attached phy */
696 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
697 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
699 if (fep->mii_bus->phy_map[phy_id] == NULL)
701 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
703 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
707 if (phy_id >= PHY_MAX_ADDR) {
708 printk(KERN_INFO "%s: no PHY, assuming direct connection "
709 "to switch\n", dev->name);
710 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
714 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
715 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
716 PHY_INTERFACE_MODE_MII);
717 if (IS_ERR(phy_dev)) {
718 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
719 return PTR_ERR(phy_dev);
722 /* mask with MAC supported features */
723 phy_dev->supported &= PHY_BASIC_FEATURES;
724 phy_dev->advertising = phy_dev->supported;
726 fep->phy_dev = phy_dev;
728 fep->full_duplex = 0;
730 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
731 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
732 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
738 static int fec_enet_mii_init(struct platform_device *pdev)
740 struct net_device *dev = platform_get_drvdata(pdev);
741 struct fec_enet_private *fep = netdev_priv(dev);
744 fep->mii_timeout = 0;
747 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
749 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
750 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
752 fep->mii_bus = mdiobus_alloc();
753 if (fep->mii_bus == NULL) {
758 fep->mii_bus->name = "fec_enet_mii_bus";
759 fep->mii_bus->read = fec_enet_mdio_read;
760 fep->mii_bus->write = fec_enet_mdio_write;
761 fep->mii_bus->reset = fec_enet_mdio_reset;
762 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
763 fep->mii_bus->priv = fep;
764 fep->mii_bus->parent = &pdev->dev;
766 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
767 if (!fep->mii_bus->irq) {
769 goto err_out_free_mdiobus;
772 for (i = 0; i < PHY_MAX_ADDR; i++)
773 fep->mii_bus->irq[i] = PHY_POLL;
775 platform_set_drvdata(dev, fep->mii_bus);
777 if (mdiobus_register(fep->mii_bus))
778 goto err_out_free_mdio_irq;
782 err_out_free_mdio_irq:
783 kfree(fep->mii_bus->irq);
784 err_out_free_mdiobus:
785 mdiobus_free(fep->mii_bus);
790 static void fec_enet_mii_remove(struct fec_enet_private *fep)
793 phy_disconnect(fep->phy_dev);
794 mdiobus_unregister(fep->mii_bus);
795 kfree(fep->mii_bus->irq);
796 mdiobus_free(fep->mii_bus);
799 static int fec_enet_get_settings(struct net_device *dev,
800 struct ethtool_cmd *cmd)
802 struct fec_enet_private *fep = netdev_priv(dev);
803 struct phy_device *phydev = fep->phy_dev;
808 return phy_ethtool_gset(phydev, cmd);
811 static int fec_enet_set_settings(struct net_device *dev,
812 struct ethtool_cmd *cmd)
814 struct fec_enet_private *fep = netdev_priv(dev);
815 struct phy_device *phydev = fep->phy_dev;
820 return phy_ethtool_sset(phydev, cmd);
823 static void fec_enet_get_drvinfo(struct net_device *dev,
824 struct ethtool_drvinfo *info)
826 struct fec_enet_private *fep = netdev_priv(dev);
828 strcpy(info->driver, fep->pdev->dev.driver->name);
829 strcpy(info->version, "Revision: 1.0");
830 strcpy(info->bus_info, dev_name(&dev->dev));
833 static struct ethtool_ops fec_enet_ethtool_ops = {
834 .get_settings = fec_enet_get_settings,
835 .set_settings = fec_enet_set_settings,
836 .get_drvinfo = fec_enet_get_drvinfo,
837 .get_link = ethtool_op_get_link,
840 static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
842 struct fec_enet_private *fep = netdev_priv(dev);
843 struct phy_device *phydev = fep->phy_dev;
845 if (!netif_running(dev))
851 return phy_mii_ioctl(phydev, rq, cmd);
854 static void fec_enet_free_buffers(struct net_device *dev)
856 struct fec_enet_private *fep = netdev_priv(dev);
861 bdp = fep->rx_bd_base;
862 for (i = 0; i < RX_RING_SIZE; i++) {
863 skb = fep->rx_skbuff[i];
865 if (bdp->cbd_bufaddr)
866 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
867 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
873 bdp = fep->tx_bd_base;
874 for (i = 0; i < TX_RING_SIZE; i++)
875 kfree(fep->tx_bounce[i]);
878 static int fec_enet_alloc_buffers(struct net_device *dev)
880 struct fec_enet_private *fep = netdev_priv(dev);
885 bdp = fep->rx_bd_base;
886 for (i = 0; i < RX_RING_SIZE; i++) {
887 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
889 fec_enet_free_buffers(dev);
892 fep->rx_skbuff[i] = skb;
894 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
895 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
896 bdp->cbd_sc = BD_ENET_RX_EMPTY;
900 /* Set the last buffer to wrap. */
902 bdp->cbd_sc |= BD_SC_WRAP;
904 bdp = fep->tx_bd_base;
905 for (i = 0; i < TX_RING_SIZE; i++) {
906 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
909 bdp->cbd_bufaddr = 0;
913 /* Set the last buffer to wrap. */
915 bdp->cbd_sc |= BD_SC_WRAP;
921 fec_enet_open(struct net_device *dev)
923 struct fec_enet_private *fep = netdev_priv(dev);
926 /* I should reset the ring buffers here, but I don't yet know
927 * a simple way to do that.
930 ret = fec_enet_alloc_buffers(dev);
934 /* Probe and connect to PHY when open the interface */
935 ret = fec_enet_mii_probe(dev);
937 fec_enet_free_buffers(dev);
940 phy_start(fep->phy_dev);
941 netif_start_queue(dev);
947 fec_enet_close(struct net_device *dev)
949 struct fec_enet_private *fep = netdev_priv(dev);
951 /* Don't know what to do yet. */
953 netif_stop_queue(dev);
957 phy_disconnect(fep->phy_dev);
959 fec_enet_free_buffers(dev);
964 /* Set or clear the multicast filter for this adaptor.
965 * Skeleton taken from sunlance driver.
966 * The CPM Ethernet implementation allows Multicast as well as individual
967 * MAC address filtering. Some of the drivers check to make sure it is
968 * a group multicast address, and discard those that are not. I guess I
969 * will do the same for now, but just remove the test if you want
970 * individual filtering as well (do the upper net layers want or support
971 * this kind of feature?).
974 #define HASH_BITS 6 /* #bits in hash */
975 #define CRC32_POLY 0xEDB88320
977 static void set_multicast_list(struct net_device *dev)
979 struct fec_enet_private *fep = netdev_priv(dev);
980 struct netdev_hw_addr *ha;
981 unsigned int i, bit, data, crc, tmp;
984 if (dev->flags & IFF_PROMISC) {
985 tmp = readl(fep->hwp + FEC_R_CNTRL);
987 writel(tmp, fep->hwp + FEC_R_CNTRL);
991 tmp = readl(fep->hwp + FEC_R_CNTRL);
993 writel(tmp, fep->hwp + FEC_R_CNTRL);
995 if (dev->flags & IFF_ALLMULTI) {
996 /* Catch all multicast addresses, so set the
999 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1000 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1005 /* Clear filter and add the addresses in hash register
1007 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1008 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1010 netdev_for_each_mc_addr(ha, dev) {
1011 /* Only support group multicast for now */
1012 if (!(ha->addr[0] & 1))
1015 /* calculate crc32 value of mac address */
1018 for (i = 0; i < dev->addr_len; i++) {
1020 for (bit = 0; bit < 8; bit++, data >>= 1) {
1022 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1026 /* only upper 6 bits (HASH_BITS) are used
1027 * which point to specific bit in he hash registers
1029 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1032 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1033 tmp |= 1 << (hash - 32);
1034 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1036 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1038 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1043 /* Set a MAC change in hardware. */
1045 fec_set_mac_address(struct net_device *dev, void *p)
1047 struct fec_enet_private *fep = netdev_priv(dev);
1048 struct sockaddr *addr = p;
1050 if (!is_valid_ether_addr(addr->sa_data))
1051 return -EADDRNOTAVAIL;
1053 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1055 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1056 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1057 fep->hwp + FEC_ADDR_LOW);
1058 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1059 fep->hwp + FEC_ADDR_HIGH);
1063 static const struct net_device_ops fec_netdev_ops = {
1064 .ndo_open = fec_enet_open,
1065 .ndo_stop = fec_enet_close,
1066 .ndo_start_xmit = fec_enet_start_xmit,
1067 .ndo_set_multicast_list = set_multicast_list,
1068 .ndo_change_mtu = eth_change_mtu,
1069 .ndo_validate_addr = eth_validate_addr,
1070 .ndo_tx_timeout = fec_timeout,
1071 .ndo_set_mac_address = fec_set_mac_address,
1072 .ndo_do_ioctl = fec_enet_ioctl,
1076 * XXX: We need to clean up on failure exits here.
1079 static int fec_enet_init(struct net_device *dev)
1081 struct fec_enet_private *fep = netdev_priv(dev);
1082 struct bufdesc *cbd_base;
1083 struct bufdesc *bdp;
1086 /* Allocate memory for buffer descriptors. */
1087 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1090 printk("FEC: allocate descriptor memory failed?\n");
1094 spin_lock_init(&fep->hw_lock);
1096 fep->hwp = (void __iomem *)dev->base_addr;
1099 /* Get the Ethernet address */
1102 /* Set receive and transmit descriptor base. */
1103 fep->rx_bd_base = cbd_base;
1104 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1106 /* The FEC Ethernet specific entries in the device structure */
1107 dev->watchdog_timeo = TX_TIMEOUT;
1108 dev->netdev_ops = &fec_netdev_ops;
1109 dev->ethtool_ops = &fec_enet_ethtool_ops;
1111 /* Initialize the receive buffer descriptors. */
1112 bdp = fep->rx_bd_base;
1113 for (i = 0; i < RX_RING_SIZE; i++) {
1115 /* Initialize the BD for every fragment in the page. */
1120 /* Set the last buffer to wrap */
1122 bdp->cbd_sc |= BD_SC_WRAP;
1124 /* ...and the same for transmit */
1125 bdp = fep->tx_bd_base;
1126 for (i = 0; i < TX_RING_SIZE; i++) {
1128 /* Initialize the BD for every fragment in the page. */
1130 bdp->cbd_bufaddr = 0;
1134 /* Set the last buffer to wrap */
1136 bdp->cbd_sc |= BD_SC_WRAP;
1138 fec_restart(dev, 0);
1143 /* This function is called to start or restart the FEC during a link
1144 * change. This only happens when switching between half and full
1148 fec_restart(struct net_device *dev, int duplex)
1150 struct fec_enet_private *fep = netdev_priv(dev);
1153 /* Whack a reset. We should wait for this. */
1154 writel(1, fep->hwp + FEC_ECNTRL);
1157 /* Clear any outstanding interrupt. */
1158 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1160 /* Reset all multicast. */
1161 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1162 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1163 #ifndef CONFIG_M5272
1164 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1165 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1168 /* Set maximum receive buffer size. */
1169 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1171 /* Set receive and transmit descriptor base. */
1172 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1173 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1174 fep->hwp + FEC_X_DES_START);
1176 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1177 fep->cur_rx = fep->rx_bd_base;
1179 /* Reset SKB transmit buffers. */
1180 fep->skb_cur = fep->skb_dirty = 0;
1181 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1182 if (fep->tx_skbuff[i]) {
1183 dev_kfree_skb_any(fep->tx_skbuff[i]);
1184 fep->tx_skbuff[i] = NULL;
1188 /* Enable MII mode */
1190 /* MII enable / FD enable */
1191 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1192 writel(0x04, fep->hwp + FEC_X_CNTRL);
1194 /* MII enable / No Rcv on Xmit */
1195 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1196 writel(0x0, fep->hwp + FEC_X_CNTRL);
1198 fep->full_duplex = duplex;
1201 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1203 #ifdef FEC_MIIGSK_ENR
1204 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1205 /* disable the gasket and wait */
1206 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1207 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1210 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
1211 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1213 /* re-enable the gasket */
1214 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1218 /* And last, enable the transmit and receive processing */
1219 writel(2, fep->hwp + FEC_ECNTRL);
1220 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1222 /* Enable interrupts we wish to service */
1223 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1227 fec_stop(struct net_device *dev)
1229 struct fec_enet_private *fep = netdev_priv(dev);
1231 /* We cannot expect a graceful transmit stop without link !!! */
1233 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1235 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1236 printk("fec_stop : Graceful transmit stop did not complete !\n");
1239 /* Whack a reset. We should wait for this. */
1240 writel(1, fep->hwp + FEC_ECNTRL);
1242 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1243 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1246 static int __devinit
1247 fec_probe(struct platform_device *pdev)
1249 struct fec_enet_private *fep;
1250 struct fec_platform_data *pdata;
1251 struct net_device *ndev;
1252 int i, irq, ret = 0;
1255 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1259 r = request_mem_region(r->start, resource_size(r), pdev->name);
1263 /* Init network device */
1264 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1268 SET_NETDEV_DEV(ndev, &pdev->dev);
1270 /* setup board info structure */
1271 fep = netdev_priv(ndev);
1272 memset(fep, 0, sizeof(*fep));
1274 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1277 if (!ndev->base_addr) {
1279 goto failed_ioremap;
1282 platform_set_drvdata(pdev, ndev);
1284 pdata = pdev->dev.platform_data;
1286 fep->phy_interface = pdata->phy;
1288 /* This device has up to three irqs on some platforms */
1289 for (i = 0; i < 3; i++) {
1290 irq = platform_get_irq(pdev, i);
1293 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1296 irq = platform_get_irq(pdev, i);
1297 free_irq(irq, ndev);
1304 fep->clk = clk_get(&pdev->dev, "fec_clk");
1305 if (IS_ERR(fep->clk)) {
1306 ret = PTR_ERR(fep->clk);
1309 clk_enable(fep->clk);
1311 ret = fec_enet_init(ndev);
1315 ret = fec_enet_mii_init(pdev);
1317 goto failed_mii_init;
1319 /* Carrier starts down, phylib will bring it up */
1320 netif_carrier_off(ndev);
1322 ret = register_netdev(ndev);
1324 goto failed_register;
1329 fec_enet_mii_remove(fep);
1332 clk_disable(fep->clk);
1335 for (i = 0; i < 3; i++) {
1336 irq = platform_get_irq(pdev, i);
1338 free_irq(irq, ndev);
1341 iounmap((void __iomem *)ndev->base_addr);
1348 static int __devexit
1349 fec_drv_remove(struct platform_device *pdev)
1351 struct net_device *ndev = platform_get_drvdata(pdev);
1352 struct fec_enet_private *fep = netdev_priv(ndev);
1354 platform_set_drvdata(pdev, NULL);
1357 fec_enet_mii_remove(fep);
1358 clk_disable(fep->clk);
1360 iounmap((void __iomem *)ndev->base_addr);
1361 unregister_netdev(ndev);
1368 fec_suspend(struct device *dev)
1370 struct net_device *ndev = dev_get_drvdata(dev);
1371 struct fec_enet_private *fep;
1374 fep = netdev_priv(ndev);
1375 if (netif_running(ndev)) {
1377 netif_device_detach(ndev);
1379 clk_disable(fep->clk);
1385 fec_resume(struct device *dev)
1387 struct net_device *ndev = dev_get_drvdata(dev);
1388 struct fec_enet_private *fep;
1391 fep = netdev_priv(ndev);
1392 clk_enable(fep->clk);
1393 if (netif_running(ndev)) {
1394 fec_restart(ndev, fep->full_duplex);
1395 netif_device_attach(ndev);
1401 static const struct dev_pm_ops fec_pm_ops = {
1402 .suspend = fec_suspend,
1403 .resume = fec_resume,
1404 .freeze = fec_suspend,
1406 .poweroff = fec_suspend,
1407 .restore = fec_resume,
1411 static struct platform_driver fec_driver = {
1414 .owner = THIS_MODULE,
1420 .remove = __devexit_p(fec_drv_remove),
1424 fec_enet_module_init(void)
1426 printk(KERN_INFO "FEC Ethernet Driver\n");
1428 return platform_driver_register(&fec_driver);
1432 fec_enet_cleanup(void)
1434 platform_driver_unregister(&fec_driver);
1437 module_exit(fec_enet_cleanup);
1438 module_init(fec_enet_module_init);
1440 MODULE_LICENSE("GPL");