2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
139 /* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
143 static inline void emac_rx_clk_tx(struct emac_instance *dev)
145 #ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
152 static inline void emac_rx_clk_default(struct emac_instance *dev)
154 #ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
161 /* PHY polling intervals */
162 #define PHY_POLL_LINK_ON HZ
163 #define PHY_POLL_LINK_OFF (HZ / 5)
165 /* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
168 #define STOP_TIMEOUT_10 1230
169 #define STOP_TIMEOUT_100 124
170 #define STOP_TIMEOUT_1000 13
171 #define STOP_TIMEOUT_1000_JUMBO 73
173 static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
177 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
196 static irqreturn_t emac_irq(int irq, void *dev_instance);
197 static void emac_clean_tx_ring(struct emac_instance *dev);
198 static void __emac_set_multicast_list(struct emac_instance *dev);
200 static inline int emac_phy_supports_gige(int phy_mode)
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
208 static inline int emac_phy_gpcs(int phy_mode)
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
214 static inline void emac_tx_enable(struct emac_instance *dev)
216 struct emac_regs __iomem *p = dev->emacp;
219 DBG(dev, "tx_enable" NL);
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
226 static void emac_tx_disable(struct emac_instance *dev)
228 struct emac_regs __iomem *p = dev->emacp;
231 DBG(dev, "tx_disable" NL);
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
242 emac_report_timeout_error(dev, "TX disable timeout");
246 static void emac_rx_enable(struct emac_instance *dev)
248 struct emac_regs __iomem *p = dev->emacp;
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
254 DBG(dev, "rx_enable" NL);
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
275 static void emac_rx_disable(struct emac_instance *dev)
277 struct emac_regs __iomem *p = dev->emacp;
280 DBG(dev, "rx_disable" NL);
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291 emac_report_timeout_error(dev, "RX disable timeout");
295 static inline void emac_netif_stop(struct emac_instance *dev)
297 netif_tx_lock_bh(dev->ndev);
299 netif_tx_unlock_bh(dev->ndev);
300 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
301 mal_poll_disable(dev->mal, &dev->commac);
302 netif_tx_disable(dev->ndev);
305 static inline void emac_netif_start(struct emac_instance *dev)
307 netif_tx_lock_bh(dev->ndev);
309 if (dev->mcast_pending && netif_running(dev->ndev))
310 __emac_set_multicast_list(dev);
311 netif_tx_unlock_bh(dev->ndev);
313 netif_wake_queue(dev->ndev);
315 /* NOTE: unconditional netif_wake_queue is only appropriate
316 * so long as all callers are assured to have free tx slots
317 * (taken from tg3... though the case where that is wrong is
318 * not terribly harmful)
320 mal_poll_enable(dev->mal, &dev->commac);
323 static inline void emac_rx_disable_async(struct emac_instance *dev)
325 struct emac_regs __iomem *p = dev->emacp;
328 DBG(dev, "rx_disable_async" NL);
330 r = in_be32(&p->mr0);
331 if (r & EMAC_MR0_RXE)
332 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
335 static int emac_reset(struct emac_instance *dev)
337 struct emac_regs __iomem *p = dev->emacp;
340 DBG(dev, "reset" NL);
342 if (!dev->reset_failed) {
343 /* 40x erratum suggests stopping RX channel before reset,
346 emac_rx_disable(dev);
347 emac_tx_disable(dev);
350 out_be32(&p->mr0, EMAC_MR0_SRST);
351 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
355 dev->reset_failed = 0;
358 emac_report_timeout_error(dev, "reset timeout");
359 dev->reset_failed = 1;
364 static void emac_hash_mc(struct emac_instance *dev)
366 const int regs = EMAC_XAHT_REGS(dev);
367 u32 *gaht_base = emac_gaht_base(dev);
369 struct dev_mc_list *dmi;
372 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
374 memset(gaht_temp, 0, sizeof (gaht_temp));
376 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
378 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
379 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
380 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
382 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
383 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
384 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
386 gaht_temp[reg] |= mask;
389 for (i = 0; i < regs; i++)
390 out_be32(gaht_base + i, gaht_temp[i]);
393 static inline u32 emac_iff2rmr(struct net_device *ndev)
395 struct emac_instance *dev = netdev_priv(ndev);
398 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
400 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
405 if (ndev->flags & IFF_PROMISC)
407 else if (ndev->flags & IFF_ALLMULTI ||
408 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
410 else if (ndev->mc_count > 0)
416 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
418 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
420 DBG2(dev, "__emac_calc_base_mr1" NL);
424 ret |= EMAC_MR1_TFS_2K;
427 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
428 dev->ndev->name, tx_size);
433 ret |= EMAC_MR1_RFS_16K;
436 ret |= EMAC_MR1_RFS_4K;
439 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
440 dev->ndev->name, rx_size);
446 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
448 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
449 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
451 DBG2(dev, "__emac4_calc_base_mr1" NL);
455 ret |= EMAC4_MR1_TFS_4K;
458 ret |= EMAC4_MR1_TFS_2K;
461 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
462 dev->ndev->name, tx_size);
467 ret |= EMAC4_MR1_RFS_16K;
470 ret |= EMAC4_MR1_RFS_4K;
473 ret |= EMAC4_MR1_RFS_2K;
476 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
477 dev->ndev->name, rx_size);
483 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
485 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
486 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
487 __emac_calc_base_mr1(dev, tx_size, rx_size);
490 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
492 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
493 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
495 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
498 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
499 unsigned int low, unsigned int high)
501 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
502 return (low << 22) | ( (high & 0x3ff) << 6);
504 return (low << 23) | ( (high & 0x1ff) << 7);
507 static int emac_configure(struct emac_instance *dev)
509 struct emac_regs __iomem *p = dev->emacp;
510 struct net_device *ndev = dev->ndev;
511 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
514 DBG(dev, "configure" NL);
517 out_be32(&p->mr1, in_be32(&p->mr1)
518 | EMAC_MR1_FDE | EMAC_MR1_ILE);
520 } else if (emac_reset(dev) < 0)
523 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
524 tah_reset(dev->tah_dev);
526 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
527 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
529 /* Default fifo sizes */
530 tx_size = dev->tx_fifo_size;
531 rx_size = dev->rx_fifo_size;
533 /* No link, force loopback */
535 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
537 /* Check for full duplex */
538 else if (dev->phy.duplex == DUPLEX_FULL)
539 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
541 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
542 dev->stop_timeout = STOP_TIMEOUT_10;
543 switch (dev->phy.speed) {
545 if (emac_phy_gpcs(dev->phy.mode)) {
546 mr1 |= EMAC_MR1_MF_1000GPCS |
547 EMAC_MR1_MF_IPPA(dev->phy.address);
549 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
550 * identify this GPCS PHY later.
552 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
554 mr1 |= EMAC_MR1_MF_1000;
556 /* Extended fifo sizes */
557 tx_size = dev->tx_fifo_size_gige;
558 rx_size = dev->rx_fifo_size_gige;
560 if (dev->ndev->mtu > ETH_DATA_LEN) {
561 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
562 mr1 |= EMAC4_MR1_JPSM;
564 mr1 |= EMAC_MR1_JPSM;
565 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
567 dev->stop_timeout = STOP_TIMEOUT_1000;
570 mr1 |= EMAC_MR1_MF_100;
571 dev->stop_timeout = STOP_TIMEOUT_100;
573 default: /* make gcc happy */
577 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
578 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
580 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
581 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
583 /* on 40x erratum forces us to NOT use integrated flow control,
584 * let's hope it works on 44x ;)
586 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
587 dev->phy.duplex == DUPLEX_FULL) {
589 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
590 else if (dev->phy.asym_pause)
594 /* Add base settings & fifo sizes & program MR1 */
595 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
596 out_be32(&p->mr1, mr1);
598 /* Set individual MAC address */
599 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
600 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
601 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
604 /* VLAN Tag Protocol ID */
605 out_be32(&p->vtpid, 0x8100);
607 /* Receive mode register */
608 r = emac_iff2rmr(ndev);
609 if (r & EMAC_RMR_MAE)
611 out_be32(&p->rmr, r);
613 /* FIFOs thresholds */
614 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
615 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
616 tx_size / 2 / dev->fifo_entry_size);
618 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
619 tx_size / 2 / dev->fifo_entry_size);
620 out_be32(&p->tmr1, r);
621 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
623 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
624 there should be still enough space in FIFO to allow the our link
625 partner time to process this frame and also time to send PAUSE
628 Here is the worst case scenario for the RX FIFO "headroom"
629 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
631 1) One maximum-length frame on TX 1522 bytes
632 2) One PAUSE frame time 64 bytes
633 3) PAUSE frame decode time allowance 64 bytes
634 4) One maximum-length frame on RX 1522 bytes
635 5) Round-trip propagation delay of the link (100Mb) 15 bytes
639 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
640 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
642 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
643 rx_size / 4 / dev->fifo_entry_size);
644 out_be32(&p->rwmr, r);
646 /* Set PAUSE timer to the maximum */
647 out_be32(&p->ptr, 0xffff);
650 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
651 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
652 EMAC_ISR_IRE | EMAC_ISR_TE;
653 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
654 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
656 out_be32(&p->iser, r);
658 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
659 if (emac_phy_gpcs(dev->phy.mode))
660 emac_mii_reset_phy(&dev->phy);
662 /* Required for Pause packet support in EMAC */
663 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
668 static void emac_reinitialize(struct emac_instance *dev)
670 DBG(dev, "reinitialize" NL);
672 emac_netif_stop(dev);
673 if (!emac_configure(dev)) {
677 emac_netif_start(dev);
680 static void emac_full_tx_reset(struct emac_instance *dev)
682 DBG(dev, "full_tx_reset" NL);
684 emac_tx_disable(dev);
685 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
686 emac_clean_tx_ring(dev);
687 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
691 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
696 static void emac_reset_work(struct work_struct *work)
698 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
700 DBG(dev, "reset_work" NL);
702 mutex_lock(&dev->link_lock);
704 emac_netif_stop(dev);
705 emac_full_tx_reset(dev);
706 emac_netif_start(dev);
708 mutex_unlock(&dev->link_lock);
711 static void emac_tx_timeout(struct net_device *ndev)
713 struct emac_instance *dev = netdev_priv(ndev);
715 DBG(dev, "tx_timeout" NL);
717 schedule_work(&dev->reset_work);
721 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
723 int done = !!(stacr & EMAC_STACR_OC);
725 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
731 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
733 struct emac_regs __iomem *p = dev->emacp;
735 int n, err = -ETIMEDOUT;
737 mutex_lock(&dev->mdio_lock);
739 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
741 /* Enable proper MDIO port */
742 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
743 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
744 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
745 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
747 /* Wait for management interface to become idle */
749 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
752 DBG2(dev, " -> timeout wait idle\n");
757 /* Issue read command */
758 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
759 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
761 r = EMAC_STACR_BASE(dev->opb_bus_freq);
762 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
764 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
765 r |= EMACX_STACR_STAC_READ;
767 r |= EMAC_STACR_STAC_READ;
768 r |= (reg & EMAC_STACR_PRA_MASK)
769 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
770 out_be32(&p->stacr, r);
772 /* Wait for read to complete */
774 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
777 DBG2(dev, " -> timeout wait complete\n");
782 if (unlikely(r & EMAC_STACR_PHYE)) {
783 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
788 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
790 DBG2(dev, "mdio_read -> %04x" NL, r);
793 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
794 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
795 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
796 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
797 mutex_unlock(&dev->mdio_lock);
799 return err == 0 ? r : err;
802 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
805 struct emac_regs __iomem *p = dev->emacp;
807 int n, err = -ETIMEDOUT;
809 mutex_lock(&dev->mdio_lock);
811 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
813 /* Enable proper MDIO port */
814 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
815 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
816 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
817 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
819 /* Wait for management interface to be idle */
821 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
824 DBG2(dev, " -> timeout wait idle\n");
829 /* Issue write command */
830 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
831 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
833 r = EMAC_STACR_BASE(dev->opb_bus_freq);
834 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
836 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
837 r |= EMACX_STACR_STAC_WRITE;
839 r |= EMAC_STACR_STAC_WRITE;
840 r |= (reg & EMAC_STACR_PRA_MASK) |
841 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
842 (val << EMAC_STACR_PHYD_SHIFT);
843 out_be32(&p->stacr, r);
845 /* Wait for write to complete */
847 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
850 DBG2(dev, " -> timeout wait complete\n");
856 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
857 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
858 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
859 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
860 mutex_unlock(&dev->mdio_lock);
863 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
865 struct emac_instance *dev = netdev_priv(ndev);
868 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
873 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
875 struct emac_instance *dev = netdev_priv(ndev);
877 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
878 (u8) id, (u8) reg, (u16) val);
882 static void __emac_set_multicast_list(struct emac_instance *dev)
884 struct emac_regs __iomem *p = dev->emacp;
885 u32 rmr = emac_iff2rmr(dev->ndev);
887 DBG(dev, "__multicast %08x" NL, rmr);
889 /* I decided to relax register access rules here to avoid
892 * There is a real problem with EMAC4 core if we use MWSW_001 bit
893 * in MR1 register and do a full EMAC reset.
894 * One TX BD status update is delayed and, after EMAC reset, it
895 * never happens, resulting in TX hung (it'll be recovered by TX
896 * timeout handler eventually, but this is just gross).
897 * So we either have to do full TX reset or try to cheat here :)
899 * The only required change is to RX mode register, so I *think* all
900 * we need is just to stop RX channel. This seems to work on all
903 * If we need the full reset, we might just trigger the workqueue
904 * and do it async... a bit nasty but should work --BenH
906 dev->mcast_pending = 0;
907 emac_rx_disable(dev);
908 if (rmr & EMAC_RMR_MAE)
910 out_be32(&p->rmr, rmr);
915 static void emac_set_multicast_list(struct net_device *ndev)
917 struct emac_instance *dev = netdev_priv(ndev);
919 DBG(dev, "multicast" NL);
921 BUG_ON(!netif_running(dev->ndev));
924 dev->mcast_pending = 1;
927 __emac_set_multicast_list(dev);
930 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
932 int rx_sync_size = emac_rx_sync_size(new_mtu);
933 int rx_skb_size = emac_rx_skb_size(new_mtu);
936 mutex_lock(&dev->link_lock);
937 emac_netif_stop(dev);
938 emac_rx_disable(dev);
939 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
941 if (dev->rx_sg_skb) {
942 ++dev->estats.rx_dropped_resize;
943 dev_kfree_skb(dev->rx_sg_skb);
944 dev->rx_sg_skb = NULL;
947 /* Make a first pass over RX ring and mark BDs ready, dropping
948 * non-processed packets on the way. We need this as a separate pass
949 * to simplify error recovery in the case of allocation failure later.
951 for (i = 0; i < NUM_RX_BUFF; ++i) {
952 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
953 ++dev->estats.rx_dropped_resize;
955 dev->rx_desc[i].data_len = 0;
956 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
957 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
960 /* Reallocate RX ring only if bigger skb buffers are required */
961 if (rx_skb_size <= dev->rx_skb_size)
964 /* Second pass, allocate new skbs */
965 for (i = 0; i < NUM_RX_BUFF; ++i) {
966 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
972 BUG_ON(!dev->rx_skb[i]);
973 dev_kfree_skb(dev->rx_skb[i]);
975 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
976 dev->rx_desc[i].data_ptr =
977 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
978 DMA_FROM_DEVICE) + 2;
979 dev->rx_skb[i] = skb;
982 /* Check if we need to change "Jumbo" bit in MR1 */
983 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
984 /* This is to prevent starting RX channel in emac_rx_enable() */
985 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
987 dev->ndev->mtu = new_mtu;
988 emac_full_tx_reset(dev);
991 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
994 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
996 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
998 emac_netif_start(dev);
999 mutex_unlock(&dev->link_lock);
1004 /* Process ctx, rtnl_lock semaphore */
1005 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1007 struct emac_instance *dev = netdev_priv(ndev);
1010 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1013 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1015 if (netif_running(ndev)) {
1016 /* Check if we really need to reinitalize RX ring */
1017 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1018 ret = emac_resize_rx_ring(dev, new_mtu);
1022 ndev->mtu = new_mtu;
1023 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1024 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1030 static void emac_clean_tx_ring(struct emac_instance *dev)
1034 for (i = 0; i < NUM_TX_BUFF; ++i) {
1035 if (dev->tx_skb[i]) {
1036 dev_kfree_skb(dev->tx_skb[i]);
1037 dev->tx_skb[i] = NULL;
1038 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1039 ++dev->estats.tx_dropped;
1041 dev->tx_desc[i].ctrl = 0;
1042 dev->tx_desc[i].data_ptr = 0;
1046 static void emac_clean_rx_ring(struct emac_instance *dev)
1050 for (i = 0; i < NUM_RX_BUFF; ++i)
1051 if (dev->rx_skb[i]) {
1052 dev->rx_desc[i].ctrl = 0;
1053 dev_kfree_skb(dev->rx_skb[i]);
1054 dev->rx_skb[i] = NULL;
1055 dev->rx_desc[i].data_ptr = 0;
1058 if (dev->rx_sg_skb) {
1059 dev_kfree_skb(dev->rx_sg_skb);
1060 dev->rx_sg_skb = NULL;
1064 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1067 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1071 dev->rx_skb[slot] = skb;
1072 dev->rx_desc[slot].data_len = 0;
1074 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1075 dev->rx_desc[slot].data_ptr =
1076 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1077 DMA_FROM_DEVICE) + 2;
1079 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1080 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1085 static void emac_print_link_status(struct emac_instance *dev)
1087 if (netif_carrier_ok(dev->ndev))
1088 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1089 dev->ndev->name, dev->phy.speed,
1090 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1091 dev->phy.pause ? ", pause enabled" :
1092 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1094 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1097 /* Process ctx, rtnl_lock semaphore */
1098 static int emac_open(struct net_device *ndev)
1100 struct emac_instance *dev = netdev_priv(ndev);
1103 DBG(dev, "open" NL);
1105 /* Setup error IRQ handler */
1106 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1108 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1109 ndev->name, dev->emac_irq);
1113 /* Allocate RX ring */
1114 for (i = 0; i < NUM_RX_BUFF; ++i)
1115 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1116 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1121 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1122 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1123 dev->rx_sg_skb = NULL;
1125 mutex_lock(&dev->link_lock);
1128 /* Start PHY polling now.
1130 if (dev->phy.address >= 0) {
1131 int link_poll_interval;
1132 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1133 dev->phy.def->ops->read_link(&dev->phy);
1134 emac_rx_clk_default(dev);
1135 netif_carrier_on(dev->ndev);
1136 link_poll_interval = PHY_POLL_LINK_ON;
1138 emac_rx_clk_tx(dev);
1139 netif_carrier_off(dev->ndev);
1140 link_poll_interval = PHY_POLL_LINK_OFF;
1142 dev->link_polling = 1;
1144 schedule_delayed_work(&dev->link_work, link_poll_interval);
1145 emac_print_link_status(dev);
1147 netif_carrier_on(dev->ndev);
1149 emac_configure(dev);
1150 mal_poll_add(dev->mal, &dev->commac);
1151 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1152 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1153 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1154 emac_tx_enable(dev);
1155 emac_rx_enable(dev);
1156 emac_netif_start(dev);
1158 mutex_unlock(&dev->link_lock);
1162 emac_clean_rx_ring(dev);
1163 free_irq(dev->emac_irq, dev);
1170 static int emac_link_differs(struct emac_instance *dev)
1172 u32 r = in_be32(&dev->emacp->mr1);
1174 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1175 int speed, pause, asym_pause;
1177 if (r & EMAC_MR1_MF_1000)
1179 else if (r & EMAC_MR1_MF_100)
1184 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1185 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1194 pause = asym_pause = 0;
1196 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1197 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1201 static void emac_link_timer(struct work_struct *work)
1203 struct emac_instance *dev =
1204 container_of((struct delayed_work *)work,
1205 struct emac_instance, link_work);
1206 int link_poll_interval;
1208 mutex_lock(&dev->link_lock);
1209 DBG2(dev, "link timer" NL);
1214 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1215 if (!netif_carrier_ok(dev->ndev)) {
1216 emac_rx_clk_default(dev);
1217 /* Get new link parameters */
1218 dev->phy.def->ops->read_link(&dev->phy);
1220 netif_carrier_on(dev->ndev);
1221 emac_netif_stop(dev);
1222 emac_full_tx_reset(dev);
1223 emac_netif_start(dev);
1224 emac_print_link_status(dev);
1226 link_poll_interval = PHY_POLL_LINK_ON;
1228 if (netif_carrier_ok(dev->ndev)) {
1229 emac_rx_clk_tx(dev);
1230 netif_carrier_off(dev->ndev);
1231 netif_tx_disable(dev->ndev);
1232 emac_reinitialize(dev);
1233 emac_print_link_status(dev);
1235 link_poll_interval = PHY_POLL_LINK_OFF;
1237 schedule_delayed_work(&dev->link_work, link_poll_interval);
1239 mutex_unlock(&dev->link_lock);
1242 static void emac_force_link_update(struct emac_instance *dev)
1244 netif_carrier_off(dev->ndev);
1246 if (dev->link_polling) {
1247 cancel_rearming_delayed_work(&dev->link_work);
1248 if (dev->link_polling)
1249 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1253 /* Process ctx, rtnl_lock semaphore */
1254 static int emac_close(struct net_device *ndev)
1256 struct emac_instance *dev = netdev_priv(ndev);
1258 DBG(dev, "close" NL);
1260 if (dev->phy.address >= 0) {
1261 dev->link_polling = 0;
1262 cancel_rearming_delayed_work(&dev->link_work);
1264 mutex_lock(&dev->link_lock);
1265 emac_netif_stop(dev);
1267 mutex_unlock(&dev->link_lock);
1269 emac_rx_disable(dev);
1270 emac_tx_disable(dev);
1271 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1272 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1273 mal_poll_del(dev->mal, &dev->commac);
1275 emac_clean_tx_ring(dev);
1276 emac_clean_rx_ring(dev);
1278 free_irq(dev->emac_irq, dev);
1283 static inline u16 emac_tx_csum(struct emac_instance *dev,
1284 struct sk_buff *skb)
1286 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1287 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1288 ++dev->stats.tx_packets_csum;
1289 return EMAC_TX_CTRL_TAH_CSUM;
1294 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1296 struct emac_regs __iomem *p = dev->emacp;
1297 struct net_device *ndev = dev->ndev;
1299 /* Send the packet out. If the if makes a significant perf
1300 * difference, then we can store the TMR0 value in "dev"
1303 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1304 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1306 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1308 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1309 netif_stop_queue(ndev);
1310 DBG2(dev, "stopped TX queue" NL);
1313 ndev->trans_start = jiffies;
1314 ++dev->stats.tx_packets;
1315 dev->stats.tx_bytes += len;
1321 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1323 struct emac_instance *dev = netdev_priv(ndev);
1324 unsigned int len = skb->len;
1327 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1328 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1330 slot = dev->tx_slot++;
1331 if (dev->tx_slot == NUM_TX_BUFF) {
1333 ctrl |= MAL_TX_CTRL_WRAP;
1336 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1338 dev->tx_skb[slot] = skb;
1339 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1342 dev->tx_desc[slot].data_len = (u16) len;
1344 dev->tx_desc[slot].ctrl = ctrl;
1346 return emac_xmit_finish(dev, len);
1349 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1350 u32 pd, int len, int last, u16 base_ctrl)
1353 u16 ctrl = base_ctrl;
1354 int chunk = min(len, MAL_MAX_TX_SIZE);
1357 slot = (slot + 1) % NUM_TX_BUFF;
1360 ctrl |= MAL_TX_CTRL_LAST;
1361 if (slot == NUM_TX_BUFF - 1)
1362 ctrl |= MAL_TX_CTRL_WRAP;
1364 dev->tx_skb[slot] = NULL;
1365 dev->tx_desc[slot].data_ptr = pd;
1366 dev->tx_desc[slot].data_len = (u16) chunk;
1367 dev->tx_desc[slot].ctrl = ctrl;
1378 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1379 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1381 struct emac_instance *dev = netdev_priv(ndev);
1382 int nr_frags = skb_shinfo(skb)->nr_frags;
1383 int len = skb->len, chunk;
1388 /* This is common "fast" path */
1389 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1390 return emac_start_xmit(skb, ndev);
1392 len -= skb->data_len;
1394 /* Note, this is only an *estimation*, we can still run out of empty
1395 * slots because of the additional fragmentation into
1396 * MAL_MAX_TX_SIZE-sized chunks
1398 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1401 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1402 emac_tx_csum(dev, skb);
1403 slot = dev->tx_slot;
1406 dev->tx_skb[slot] = NULL;
1407 chunk = min(len, MAL_MAX_TX_SIZE);
1408 dev->tx_desc[slot].data_ptr = pd =
1409 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1410 dev->tx_desc[slot].data_len = (u16) chunk;
1413 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1416 for (i = 0; i < nr_frags; ++i) {
1417 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1420 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1423 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1426 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1430 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1432 /* Attach skb to the last slot so we don't release it too early */
1433 dev->tx_skb[slot] = skb;
1435 /* Send the packet out */
1436 if (dev->tx_slot == NUM_TX_BUFF - 1)
1437 ctrl |= MAL_TX_CTRL_WRAP;
1439 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1440 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1442 return emac_xmit_finish(dev, skb->len);
1445 /* Well, too bad. Our previous estimation was overly optimistic.
1448 while (slot != dev->tx_slot) {
1449 dev->tx_desc[slot].ctrl = 0;
1452 slot = NUM_TX_BUFF - 1;
1454 ++dev->estats.tx_undo;
1457 netif_stop_queue(ndev);
1458 DBG2(dev, "stopped TX queue" NL);
1463 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1465 struct emac_error_stats *st = &dev->estats;
1467 DBG(dev, "BD TX error %04x" NL, ctrl);
1470 if (ctrl & EMAC_TX_ST_BFCS)
1471 ++st->tx_bd_bad_fcs;
1472 if (ctrl & EMAC_TX_ST_LCS)
1473 ++st->tx_bd_carrier_loss;
1474 if (ctrl & EMAC_TX_ST_ED)
1475 ++st->tx_bd_excessive_deferral;
1476 if (ctrl & EMAC_TX_ST_EC)
1477 ++st->tx_bd_excessive_collisions;
1478 if (ctrl & EMAC_TX_ST_LC)
1479 ++st->tx_bd_late_collision;
1480 if (ctrl & EMAC_TX_ST_MC)
1481 ++st->tx_bd_multple_collisions;
1482 if (ctrl & EMAC_TX_ST_SC)
1483 ++st->tx_bd_single_collision;
1484 if (ctrl & EMAC_TX_ST_UR)
1485 ++st->tx_bd_underrun;
1486 if (ctrl & EMAC_TX_ST_SQE)
1490 static void emac_poll_tx(void *param)
1492 struct emac_instance *dev = param;
1495 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1497 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1498 bad_mask = EMAC_IS_BAD_TX_TAH;
1500 bad_mask = EMAC_IS_BAD_TX;
1502 netif_tx_lock_bh(dev->ndev);
1505 int slot = dev->ack_slot, n = 0;
1507 ctrl = dev->tx_desc[slot].ctrl;
1508 if (!(ctrl & MAL_TX_CTRL_READY)) {
1509 struct sk_buff *skb = dev->tx_skb[slot];
1514 dev->tx_skb[slot] = NULL;
1516 slot = (slot + 1) % NUM_TX_BUFF;
1518 if (unlikely(ctrl & bad_mask))
1519 emac_parse_tx_error(dev, ctrl);
1525 dev->ack_slot = slot;
1526 if (netif_queue_stopped(dev->ndev) &&
1527 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1528 netif_wake_queue(dev->ndev);
1530 DBG2(dev, "tx %d pkts" NL, n);
1533 netif_tx_unlock_bh(dev->ndev);
1536 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1539 struct sk_buff *skb = dev->rx_skb[slot];
1541 DBG2(dev, "recycle %d %d" NL, slot, len);
1544 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1545 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1547 dev->rx_desc[slot].data_len = 0;
1549 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1550 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1553 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1555 struct emac_error_stats *st = &dev->estats;
1557 DBG(dev, "BD RX error %04x" NL, ctrl);
1560 if (ctrl & EMAC_RX_ST_OE)
1561 ++st->rx_bd_overrun;
1562 if (ctrl & EMAC_RX_ST_BP)
1563 ++st->rx_bd_bad_packet;
1564 if (ctrl & EMAC_RX_ST_RP)
1565 ++st->rx_bd_runt_packet;
1566 if (ctrl & EMAC_RX_ST_SE)
1567 ++st->rx_bd_short_event;
1568 if (ctrl & EMAC_RX_ST_AE)
1569 ++st->rx_bd_alignment_error;
1570 if (ctrl & EMAC_RX_ST_BFCS)
1571 ++st->rx_bd_bad_fcs;
1572 if (ctrl & EMAC_RX_ST_PTL)
1573 ++st->rx_bd_packet_too_long;
1574 if (ctrl & EMAC_RX_ST_ORE)
1575 ++st->rx_bd_out_of_range;
1576 if (ctrl & EMAC_RX_ST_IRE)
1577 ++st->rx_bd_in_range;
1580 static inline void emac_rx_csum(struct emac_instance *dev,
1581 struct sk_buff *skb, u16 ctrl)
1583 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1584 if (!ctrl && dev->tah_dev) {
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586 ++dev->stats.rx_packets_csum;
1591 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1593 if (likely(dev->rx_sg_skb != NULL)) {
1594 int len = dev->rx_desc[slot].data_len;
1595 int tot_len = dev->rx_sg_skb->len + len;
1597 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1598 ++dev->estats.rx_dropped_mtu;
1599 dev_kfree_skb(dev->rx_sg_skb);
1600 dev->rx_sg_skb = NULL;
1602 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1603 dev->rx_skb[slot]->data, len);
1604 skb_put(dev->rx_sg_skb, len);
1605 emac_recycle_rx_skb(dev, slot, len);
1609 emac_recycle_rx_skb(dev, slot, 0);
1613 /* NAPI poll context */
1614 static int emac_poll_rx(void *param, int budget)
1616 struct emac_instance *dev = param;
1617 int slot = dev->rx_slot, received = 0;
1619 DBG2(dev, "poll_rx(%d)" NL, budget);
1622 while (budget > 0) {
1624 struct sk_buff *skb;
1625 u16 ctrl = dev->rx_desc[slot].ctrl;
1627 if (ctrl & MAL_RX_CTRL_EMPTY)
1630 skb = dev->rx_skb[slot];
1632 len = dev->rx_desc[slot].data_len;
1634 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1637 ctrl &= EMAC_BAD_RX_MASK;
1638 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1639 emac_parse_rx_error(dev, ctrl);
1640 ++dev->estats.rx_dropped_error;
1641 emac_recycle_rx_skb(dev, slot, 0);
1646 if (len && len < EMAC_RX_COPY_THRESH) {
1647 struct sk_buff *copy_skb =
1648 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1649 if (unlikely(!copy_skb))
1652 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1653 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1655 emac_recycle_rx_skb(dev, slot, len);
1657 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1662 skb->dev = dev->ndev;
1663 skb->protocol = eth_type_trans(skb, dev->ndev);
1664 emac_rx_csum(dev, skb, ctrl);
1666 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1667 ++dev->estats.rx_dropped_stack;
1669 ++dev->stats.rx_packets;
1671 dev->stats.rx_bytes += len;
1672 slot = (slot + 1) % NUM_RX_BUFF;
1677 if (ctrl & MAL_RX_CTRL_FIRST) {
1678 BUG_ON(dev->rx_sg_skb);
1679 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1680 DBG(dev, "rx OOM %d" NL, slot);
1681 ++dev->estats.rx_dropped_oom;
1682 emac_recycle_rx_skb(dev, slot, 0);
1684 dev->rx_sg_skb = skb;
1687 } else if (!emac_rx_sg_append(dev, slot) &&
1688 (ctrl & MAL_RX_CTRL_LAST)) {
1690 skb = dev->rx_sg_skb;
1691 dev->rx_sg_skb = NULL;
1693 ctrl &= EMAC_BAD_RX_MASK;
1694 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1695 emac_parse_rx_error(dev, ctrl);
1696 ++dev->estats.rx_dropped_error;
1704 DBG(dev, "rx OOM %d" NL, slot);
1705 /* Drop the packet and recycle skb */
1706 ++dev->estats.rx_dropped_oom;
1707 emac_recycle_rx_skb(dev, slot, 0);
1712 DBG2(dev, "rx %d BDs" NL, received);
1713 dev->rx_slot = slot;
1716 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1718 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1719 DBG2(dev, "rx restart" NL);
1724 if (dev->rx_sg_skb) {
1725 DBG2(dev, "dropping partial rx packet" NL);
1726 ++dev->estats.rx_dropped_error;
1727 dev_kfree_skb(dev->rx_sg_skb);
1728 dev->rx_sg_skb = NULL;
1731 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1732 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1733 emac_rx_enable(dev);
1739 /* NAPI poll context */
1740 static int emac_peek_rx(void *param)
1742 struct emac_instance *dev = param;
1744 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1747 /* NAPI poll context */
1748 static int emac_peek_rx_sg(void *param)
1750 struct emac_instance *dev = param;
1752 int slot = dev->rx_slot;
1754 u16 ctrl = dev->rx_desc[slot].ctrl;
1755 if (ctrl & MAL_RX_CTRL_EMPTY)
1757 else if (ctrl & MAL_RX_CTRL_LAST)
1760 slot = (slot + 1) % NUM_RX_BUFF;
1762 /* I'm just being paranoid here :) */
1763 if (unlikely(slot == dev->rx_slot))
1769 static void emac_rxde(void *param)
1771 struct emac_instance *dev = param;
1773 ++dev->estats.rx_stopped;
1774 emac_rx_disable_async(dev);
1778 static irqreturn_t emac_irq(int irq, void *dev_instance)
1780 struct emac_instance *dev = dev_instance;
1781 struct emac_regs __iomem *p = dev->emacp;
1782 struct emac_error_stats *st = &dev->estats;
1785 spin_lock(&dev->lock);
1787 isr = in_be32(&p->isr);
1788 out_be32(&p->isr, isr);
1790 DBG(dev, "isr = %08x" NL, isr);
1792 if (isr & EMAC4_ISR_TXPE)
1794 if (isr & EMAC4_ISR_RXPE)
1796 if (isr & EMAC4_ISR_TXUE)
1798 if (isr & EMAC4_ISR_RXOE)
1799 ++st->rx_fifo_overrun;
1800 if (isr & EMAC_ISR_OVR)
1802 if (isr & EMAC_ISR_BP)
1803 ++st->rx_bad_packet;
1804 if (isr & EMAC_ISR_RP)
1805 ++st->rx_runt_packet;
1806 if (isr & EMAC_ISR_SE)
1807 ++st->rx_short_event;
1808 if (isr & EMAC_ISR_ALE)
1809 ++st->rx_alignment_error;
1810 if (isr & EMAC_ISR_BFCS)
1812 if (isr & EMAC_ISR_PTLE)
1813 ++st->rx_packet_too_long;
1814 if (isr & EMAC_ISR_ORE)
1815 ++st->rx_out_of_range;
1816 if (isr & EMAC_ISR_IRE)
1818 if (isr & EMAC_ISR_SQE)
1820 if (isr & EMAC_ISR_TE)
1823 spin_unlock(&dev->lock);
1828 static struct net_device_stats *emac_stats(struct net_device *ndev)
1830 struct emac_instance *dev = netdev_priv(ndev);
1831 struct emac_stats *st = &dev->stats;
1832 struct emac_error_stats *est = &dev->estats;
1833 struct net_device_stats *nst = &dev->nstats;
1834 unsigned long flags;
1836 DBG2(dev, "stats" NL);
1838 /* Compute "legacy" statistics */
1839 spin_lock_irqsave(&dev->lock, flags);
1840 nst->rx_packets = (unsigned long)st->rx_packets;
1841 nst->rx_bytes = (unsigned long)st->rx_bytes;
1842 nst->tx_packets = (unsigned long)st->tx_packets;
1843 nst->tx_bytes = (unsigned long)st->tx_bytes;
1844 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1845 est->rx_dropped_error +
1846 est->rx_dropped_resize +
1847 est->rx_dropped_mtu);
1848 nst->tx_dropped = (unsigned long)est->tx_dropped;
1850 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1851 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1852 est->rx_fifo_overrun +
1854 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1855 est->rx_alignment_error);
1856 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1858 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1859 est->rx_bd_short_event +
1860 est->rx_bd_packet_too_long +
1861 est->rx_bd_out_of_range +
1862 est->rx_bd_in_range +
1863 est->rx_runt_packet +
1864 est->rx_short_event +
1865 est->rx_packet_too_long +
1866 est->rx_out_of_range +
1869 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1870 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1872 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1873 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1874 est->tx_bd_excessive_collisions +
1875 est->tx_bd_late_collision +
1876 est->tx_bd_multple_collisions);
1877 spin_unlock_irqrestore(&dev->lock, flags);
1881 static struct mal_commac_ops emac_commac_ops = {
1882 .poll_tx = &emac_poll_tx,
1883 .poll_rx = &emac_poll_rx,
1884 .peek_rx = &emac_peek_rx,
1888 static struct mal_commac_ops emac_commac_sg_ops = {
1889 .poll_tx = &emac_poll_tx,
1890 .poll_rx = &emac_poll_rx,
1891 .peek_rx = &emac_peek_rx_sg,
1895 /* Ethtool support */
1896 static int emac_ethtool_get_settings(struct net_device *ndev,
1897 struct ethtool_cmd *cmd)
1899 struct emac_instance *dev = netdev_priv(ndev);
1901 cmd->supported = dev->phy.features;
1902 cmd->port = PORT_MII;
1903 cmd->phy_address = dev->phy.address;
1905 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1907 mutex_lock(&dev->link_lock);
1908 cmd->advertising = dev->phy.advertising;
1909 cmd->autoneg = dev->phy.autoneg;
1910 cmd->speed = dev->phy.speed;
1911 cmd->duplex = dev->phy.duplex;
1912 mutex_unlock(&dev->link_lock);
1917 static int emac_ethtool_set_settings(struct net_device *ndev,
1918 struct ethtool_cmd *cmd)
1920 struct emac_instance *dev = netdev_priv(ndev);
1921 u32 f = dev->phy.features;
1923 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1924 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1926 /* Basic sanity checks */
1927 if (dev->phy.address < 0)
1929 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1931 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1933 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1936 if (cmd->autoneg == AUTONEG_DISABLE) {
1937 switch (cmd->speed) {
1939 if (cmd->duplex == DUPLEX_HALF
1940 && !(f & SUPPORTED_10baseT_Half))
1942 if (cmd->duplex == DUPLEX_FULL
1943 && !(f & SUPPORTED_10baseT_Full))
1947 if (cmd->duplex == DUPLEX_HALF
1948 && !(f & SUPPORTED_100baseT_Half))
1950 if (cmd->duplex == DUPLEX_FULL
1951 && !(f & SUPPORTED_100baseT_Full))
1955 if (cmd->duplex == DUPLEX_HALF
1956 && !(f & SUPPORTED_1000baseT_Half))
1958 if (cmd->duplex == DUPLEX_FULL
1959 && !(f & SUPPORTED_1000baseT_Full))
1966 mutex_lock(&dev->link_lock);
1967 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1969 mutex_unlock(&dev->link_lock);
1972 if (!(f & SUPPORTED_Autoneg))
1975 mutex_lock(&dev->link_lock);
1976 dev->phy.def->ops->setup_aneg(&dev->phy,
1977 (cmd->advertising & f) |
1978 (dev->phy.advertising &
1980 ADVERTISED_Asym_Pause)));
1981 mutex_unlock(&dev->link_lock);
1983 emac_force_link_update(dev);
1988 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1989 struct ethtool_ringparam *rp)
1991 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1992 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1995 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1996 struct ethtool_pauseparam *pp)
1998 struct emac_instance *dev = netdev_priv(ndev);
2000 mutex_lock(&dev->link_lock);
2001 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2002 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2005 if (dev->phy.duplex == DUPLEX_FULL) {
2007 pp->rx_pause = pp->tx_pause = 1;
2008 else if (dev->phy.asym_pause)
2011 mutex_unlock(&dev->link_lock);
2014 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2016 struct emac_instance *dev = netdev_priv(ndev);
2018 return dev->tah_dev != NULL;
2021 static int emac_get_regs_len(struct emac_instance *dev)
2023 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2024 return sizeof(struct emac_ethtool_regs_subhdr) +
2025 EMAC4_ETHTOOL_REGS_SIZE(dev);
2027 return sizeof(struct emac_ethtool_regs_subhdr) +
2028 EMAC_ETHTOOL_REGS_SIZE(dev);
2031 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2033 struct emac_instance *dev = netdev_priv(ndev);
2036 size = sizeof(struct emac_ethtool_regs_hdr) +
2037 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2038 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2039 size += zmii_get_regs_len(dev->zmii_dev);
2040 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2041 size += rgmii_get_regs_len(dev->rgmii_dev);
2042 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2043 size += tah_get_regs_len(dev->tah_dev);
2048 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2050 struct emac_ethtool_regs_subhdr *hdr = buf;
2052 hdr->index = dev->cell_index;
2053 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2054 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2055 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2056 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2058 hdr->version = EMAC_ETHTOOL_REGS_VER;
2059 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2060 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2064 static void emac_ethtool_get_regs(struct net_device *ndev,
2065 struct ethtool_regs *regs, void *buf)
2067 struct emac_instance *dev = netdev_priv(ndev);
2068 struct emac_ethtool_regs_hdr *hdr = buf;
2070 hdr->components = 0;
2073 buf = mal_dump_regs(dev->mal, buf);
2074 buf = emac_dump_regs(dev, buf);
2075 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2076 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2077 buf = zmii_dump_regs(dev->zmii_dev, buf);
2079 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2080 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2081 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2083 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2084 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2085 buf = tah_dump_regs(dev->tah_dev, buf);
2089 static int emac_ethtool_nway_reset(struct net_device *ndev)
2091 struct emac_instance *dev = netdev_priv(ndev);
2094 DBG(dev, "nway_reset" NL);
2096 if (dev->phy.address < 0)
2099 mutex_lock(&dev->link_lock);
2100 if (!dev->phy.autoneg) {
2105 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2107 mutex_unlock(&dev->link_lock);
2108 emac_force_link_update(dev);
2112 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2114 return EMAC_ETHTOOL_STATS_COUNT;
2117 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2120 if (stringset == ETH_SS_STATS)
2121 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2124 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2125 struct ethtool_stats *estats,
2128 struct emac_instance *dev = netdev_priv(ndev);
2130 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2131 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2132 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2135 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2136 struct ethtool_drvinfo *info)
2138 struct emac_instance *dev = netdev_priv(ndev);
2140 strcpy(info->driver, "ibm_emac");
2141 strcpy(info->version, DRV_VERSION);
2142 info->fw_version[0] = '\0';
2143 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2144 dev->cell_index, dev->ofdev->node->full_name);
2145 info->n_stats = emac_ethtool_get_stats_count(ndev);
2146 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2149 static const struct ethtool_ops emac_ethtool_ops = {
2150 .get_settings = emac_ethtool_get_settings,
2151 .set_settings = emac_ethtool_set_settings,
2152 .get_drvinfo = emac_ethtool_get_drvinfo,
2154 .get_regs_len = emac_ethtool_get_regs_len,
2155 .get_regs = emac_ethtool_get_regs,
2157 .nway_reset = emac_ethtool_nway_reset,
2159 .get_ringparam = emac_ethtool_get_ringparam,
2160 .get_pauseparam = emac_ethtool_get_pauseparam,
2162 .get_rx_csum = emac_ethtool_get_rx_csum,
2164 .get_strings = emac_ethtool_get_strings,
2165 .get_stats_count = emac_ethtool_get_stats_count,
2166 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2168 .get_link = ethtool_op_get_link,
2169 .get_tx_csum = ethtool_op_get_tx_csum,
2170 .get_sg = ethtool_op_get_sg,
2173 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2175 struct emac_instance *dev = netdev_priv(ndev);
2176 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2178 DBG(dev, "ioctl %08x" NL, cmd);
2180 if (dev->phy.address < 0)
2185 case SIOCDEVPRIVATE:
2186 data[0] = dev->phy.address;
2189 case SIOCDEVPRIVATE + 1:
2190 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2194 case SIOCDEVPRIVATE + 2:
2195 if (!capable(CAP_NET_ADMIN))
2197 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2204 struct emac_depentry {
2206 struct device_node *node;
2207 struct of_device *ofdev;
2211 #define EMAC_DEP_MAL_IDX 0
2212 #define EMAC_DEP_ZMII_IDX 1
2213 #define EMAC_DEP_RGMII_IDX 2
2214 #define EMAC_DEP_TAH_IDX 3
2215 #define EMAC_DEP_MDIO_IDX 4
2216 #define EMAC_DEP_PREV_IDX 5
2217 #define EMAC_DEP_COUNT 6
2219 static int __devinit emac_check_deps(struct emac_instance *dev,
2220 struct emac_depentry *deps)
2223 struct device_node *np;
2225 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2226 /* no dependency on that item, allright */
2227 if (deps[i].phandle == 0) {
2231 /* special case for blist as the dependency might go away */
2232 if (i == EMAC_DEP_PREV_IDX) {
2233 np = *(dev->blist - 1);
2235 deps[i].phandle = 0;
2239 if (deps[i].node == NULL)
2240 deps[i].node = of_node_get(np);
2242 if (deps[i].node == NULL)
2243 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2244 if (deps[i].node == NULL)
2246 if (deps[i].ofdev == NULL)
2247 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2248 if (deps[i].ofdev == NULL)
2250 if (deps[i].drvdata == NULL)
2251 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2252 if (deps[i].drvdata != NULL)
2255 return (there == EMAC_DEP_COUNT);
2258 static void emac_put_deps(struct emac_instance *dev)
2261 of_dev_put(dev->mal_dev);
2263 of_dev_put(dev->zmii_dev);
2265 of_dev_put(dev->rgmii_dev);
2267 of_dev_put(dev->mdio_dev);
2269 of_dev_put(dev->tah_dev);
2272 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2273 unsigned long action, void *data)
2275 /* We are only intereted in device addition */
2276 if (action == BUS_NOTIFY_BOUND_DRIVER)
2277 wake_up_all(&emac_probe_wait);
2281 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2282 .notifier_call = emac_of_bus_notify
2285 static int __devinit emac_wait_deps(struct emac_instance *dev)
2287 struct emac_depentry deps[EMAC_DEP_COUNT];
2290 memset(&deps, 0, sizeof(deps));
2292 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2293 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2294 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2296 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2298 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2299 if (dev->blist && dev->blist > emac_boot_list)
2300 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2301 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2302 wait_event_timeout(emac_probe_wait,
2303 emac_check_deps(dev, deps),
2304 EMAC_PROBE_DEP_TIMEOUT);
2305 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2306 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2307 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2309 of_node_put(deps[i].node);
2310 if (err && deps[i].ofdev)
2311 of_dev_put(deps[i].ofdev);
2314 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2315 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2316 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2317 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2318 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2320 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2321 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2325 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2326 u32 *val, int fatal)
2329 const u32 *prop = of_get_property(np, name, &len);
2330 if (prop == NULL || len < sizeof(u32)) {
2332 printk(KERN_ERR "%s: missing %s property\n",
2333 np->full_name, name);
2340 static int __devinit emac_init_phy(struct emac_instance *dev)
2342 struct device_node *np = dev->ofdev->node;
2343 struct net_device *ndev = dev->ndev;
2347 dev->phy.dev = ndev;
2348 dev->phy.mode = dev->phy_mode;
2350 /* PHY-less configuration.
2351 * XXX I probably should move these settings to the dev tree
2353 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2356 /* PHY-less configuration.
2357 * XXX I probably should move these settings to the dev tree
2359 dev->phy.address = -1;
2360 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2366 mutex_lock(&emac_phy_map_lock);
2367 phy_map = dev->phy_map | busy_phy_map;
2369 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2371 dev->phy.mdio_read = emac_mdio_read;
2372 dev->phy.mdio_write = emac_mdio_write;
2374 /* Enable internal clock source */
2375 #ifdef CONFIG_PPC_DCR_NATIVE
2376 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2377 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2379 /* PHY clock workaround */
2380 emac_rx_clk_tx(dev);
2382 /* Enable internal clock source on 440GX*/
2383 #ifdef CONFIG_PPC_DCR_NATIVE
2384 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2385 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2387 /* Configure EMAC with defaults so we can at least use MDIO
2388 * This is needed mostly for 440GX
2390 if (emac_phy_gpcs(dev->phy.mode)) {
2392 * Make GPCS PHY address equal to EMAC index.
2393 * We probably should take into account busy_phy_map
2394 * and/or phy_map here.
2396 * Note that the busy_phy_map is currently global
2397 * while it should probably be per-ASIC...
2399 dev->phy.address = dev->cell_index;
2402 emac_configure(dev);
2404 if (dev->phy_address != 0xffffffff)
2405 phy_map = ~(1 << dev->phy_address);
2407 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2408 if (!(phy_map & 1)) {
2410 busy_phy_map |= 1 << i;
2412 /* Quick check if there is a PHY at the address */
2413 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2414 if (r == 0xffff || r < 0)
2416 if (!emac_mii_phy_probe(&dev->phy, i))
2420 /* Enable external clock source */
2421 #ifdef CONFIG_PPC_DCR_NATIVE
2422 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2423 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2425 mutex_unlock(&emac_phy_map_lock);
2427 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2432 if (dev->phy.def->ops->init)
2433 dev->phy.def->ops->init(&dev->phy);
2435 /* Disable any PHY features not supported by the platform */
2436 dev->phy.def->features &= ~dev->phy_feat_exc;
2438 /* Setup initial link parameters */
2439 if (dev->phy.features & SUPPORTED_Autoneg) {
2440 adv = dev->phy.features;
2441 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2442 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2443 /* Restart autonegotiation */
2444 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2446 u32 f = dev->phy.def->features;
2447 int speed = SPEED_10, fd = DUPLEX_HALF;
2449 /* Select highest supported speed/duplex */
2450 if (f & SUPPORTED_1000baseT_Full) {
2453 } else if (f & SUPPORTED_1000baseT_Half)
2455 else if (f & SUPPORTED_100baseT_Full) {
2458 } else if (f & SUPPORTED_100baseT_Half)
2460 else if (f & SUPPORTED_10baseT_Full)
2463 /* Force link parameters */
2464 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2469 static int __devinit emac_init_config(struct emac_instance *dev)
2471 struct device_node *np = dev->ofdev->node;
2474 const char *pm, *phy_modes[] = {
2476 [PHY_MODE_MII] = "mii",
2477 [PHY_MODE_RMII] = "rmii",
2478 [PHY_MODE_SMII] = "smii",
2479 [PHY_MODE_RGMII] = "rgmii",
2480 [PHY_MODE_TBI] = "tbi",
2481 [PHY_MODE_GMII] = "gmii",
2482 [PHY_MODE_RTBI] = "rtbi",
2483 [PHY_MODE_SGMII] = "sgmii",
2486 /* Read config from device-tree */
2487 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2489 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2491 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2493 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2495 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2496 dev->max_mtu = 1500;
2497 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2498 dev->rx_fifo_size = 2048;
2499 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2500 dev->tx_fifo_size = 2048;
2501 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2502 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2503 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2504 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2505 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2506 dev->phy_address = 0xffffffff;
2507 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2508 dev->phy_map = 0xffffffff;
2509 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2511 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2513 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2515 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2517 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2519 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2520 dev->zmii_port = 0xffffffff;;
2521 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2523 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2524 dev->rgmii_port = 0xffffffff;;
2525 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2526 dev->fifo_entry_size = 16;
2527 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2528 dev->mal_burst_size = 256;
2530 /* PHY mode needs some decoding */
2531 dev->phy_mode = PHY_MODE_NA;
2532 pm = of_get_property(np, "phy-mode", &plen);
2535 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2536 if (!strcasecmp(pm, phy_modes[i])) {
2542 /* Backward compat with non-final DT */
2543 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2544 u32 nmode = *(const u32 *)pm;
2545 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2546 dev->phy_mode = nmode;
2549 /* Check EMAC version */
2550 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2551 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2552 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2553 dev->features |= EMAC_FTR_EMAC4;
2554 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2555 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2557 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2558 of_device_is_compatible(np, "ibm,emac-440gr"))
2559 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2562 /* Fixup some feature bits based on the device tree */
2563 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2564 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2565 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2566 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2568 /* CAB lacks the appropriate properties */
2569 if (of_device_is_compatible(np, "ibm,emac-axon"))
2570 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2571 EMAC_FTR_STACR_OC_INVERT;
2573 /* Enable TAH/ZMII/RGMII features as found */
2574 if (dev->tah_ph != 0) {
2575 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2576 dev->features |= EMAC_FTR_HAS_TAH;
2578 printk(KERN_ERR "%s: TAH support not enabled !\n",
2584 if (dev->zmii_ph != 0) {
2585 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2586 dev->features |= EMAC_FTR_HAS_ZMII;
2588 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2594 if (dev->rgmii_ph != 0) {
2595 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2596 dev->features |= EMAC_FTR_HAS_RGMII;
2598 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2604 /* Read MAC-address */
2605 p = of_get_property(np, "local-mac-address", NULL);
2607 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2611 memcpy(dev->ndev->dev_addr, p, 6);
2613 /* IAHT and GAHT filter parameterization */
2614 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2615 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2616 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2618 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2619 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2622 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2623 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2624 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2625 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2626 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2631 static int __devinit emac_probe(struct of_device *ofdev,
2632 const struct of_device_id *match)
2634 struct net_device *ndev;
2635 struct emac_instance *dev;
2636 struct device_node *np = ofdev->node;
2637 struct device_node **blist = NULL;
2640 /* Skip unused/unwired EMACS. We leave the check for an unused
2641 * property here for now, but new flat device trees should set a
2642 * status property to "disabled" instead.
2644 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2647 /* Find ourselves in the bootlist if we are there */
2648 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2649 if (emac_boot_list[i] == np)
2650 blist = &emac_boot_list[i];
2652 /* Allocate our net_device structure */
2654 ndev = alloc_etherdev(sizeof(struct emac_instance));
2656 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2660 dev = netdev_priv(ndev);
2664 SET_NETDEV_DEV(ndev, &ofdev->dev);
2666 /* Initialize some embedded data structures */
2667 mutex_init(&dev->mdio_lock);
2668 mutex_init(&dev->link_lock);
2669 spin_lock_init(&dev->lock);
2670 INIT_WORK(&dev->reset_work, emac_reset_work);
2672 /* Init various config data based on device-tree */
2673 err = emac_init_config(dev);
2677 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2678 dev->emac_irq = irq_of_parse_and_map(np, 0);
2679 dev->wol_irq = irq_of_parse_and_map(np, 1);
2680 if (dev->emac_irq == NO_IRQ) {
2681 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2684 ndev->irq = dev->emac_irq;
2687 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2688 printk(KERN_ERR "%s: Can't get registers address\n",
2692 // TODO : request_mem_region
2693 dev->emacp = ioremap(dev->rsrc_regs.start,
2694 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2695 if (dev->emacp == NULL) {
2696 printk(KERN_ERR "%s: Can't map device registers!\n",
2702 /* Wait for dependent devices */
2703 err = emac_wait_deps(dev);
2706 "%s: Timeout waiting for dependent devices\n",
2708 /* display more info about what's missing ? */
2711 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2712 if (dev->mdio_dev != NULL)
2713 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2715 /* Register with MAL */
2716 dev->commac.ops = &emac_commac_ops;
2717 dev->commac.dev = dev;
2718 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2719 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2720 err = mal_register_commac(dev->mal, &dev->commac);
2722 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2723 np->full_name, dev->mal_dev->node->full_name);
2726 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2727 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2729 /* Get pointers to BD rings */
2731 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2733 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2735 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2736 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2739 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2740 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2742 /* Attach to ZMII, if needed */
2743 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2744 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2745 goto err_unreg_commac;
2747 /* Attach to RGMII, if needed */
2748 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2749 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2750 goto err_detach_zmii;
2752 /* Attach to TAH, if needed */
2753 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2754 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2755 goto err_detach_rgmii;
2757 /* Set some link defaults before we can find out real parameters */
2758 dev->phy.speed = SPEED_100;
2759 dev->phy.duplex = DUPLEX_FULL;
2760 dev->phy.autoneg = AUTONEG_DISABLE;
2761 dev->phy.pause = dev->phy.asym_pause = 0;
2762 dev->stop_timeout = STOP_TIMEOUT_100;
2763 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2765 /* Find PHY if any */
2766 err = emac_init_phy(dev);
2768 goto err_detach_tah;
2770 /* Fill in the driver function table */
2771 ndev->open = &emac_open;
2773 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2774 ndev->tx_timeout = &emac_tx_timeout;
2775 ndev->watchdog_timeo = 5 * HZ;
2776 ndev->stop = &emac_close;
2777 ndev->get_stats = &emac_stats;
2778 ndev->set_multicast_list = &emac_set_multicast_list;
2779 ndev->do_ioctl = &emac_ioctl;
2780 if (emac_phy_supports_gige(dev->phy_mode)) {
2781 ndev->hard_start_xmit = &emac_start_xmit_sg;
2782 ndev->change_mtu = &emac_change_mtu;
2783 dev->commac.ops = &emac_commac_sg_ops;
2785 ndev->hard_start_xmit = &emac_start_xmit;
2787 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2789 netif_carrier_off(ndev);
2790 netif_stop_queue(ndev);
2792 err = register_netdev(ndev);
2794 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2795 np->full_name, err);
2796 goto err_detach_tah;
2799 /* Set our drvdata last as we don't want them visible until we are
2803 dev_set_drvdata(&ofdev->dev, dev);
2805 /* There's a new kid in town ! Let's tell everybody */
2806 wake_up_all(&emac_probe_wait);
2810 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2811 ndev->name, dev->cell_index, np->full_name,
2812 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2813 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2815 if (dev->phy.address >= 0)
2816 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2817 dev->phy.def->name, dev->phy.address);
2819 emac_dbg_register(dev);
2824 /* I have a bad feeling about this ... */
2827 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2828 tah_detach(dev->tah_dev, dev->tah_port);
2830 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2831 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2833 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2834 zmii_detach(dev->zmii_dev, dev->zmii_port);
2836 mal_unregister_commac(dev->mal, &dev->commac);
2840 iounmap(dev->emacp);
2842 if (dev->wol_irq != NO_IRQ)
2843 irq_dispose_mapping(dev->wol_irq);
2844 if (dev->emac_irq != NO_IRQ)
2845 irq_dispose_mapping(dev->emac_irq);
2849 /* if we were on the bootlist, remove us as we won't show up and
2850 * wake up all waiters to notify them in case they were waiting
2855 wake_up_all(&emac_probe_wait);
2860 static int __devexit emac_remove(struct of_device *ofdev)
2862 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2864 DBG(dev, "remove" NL);
2866 dev_set_drvdata(&ofdev->dev, NULL);
2868 unregister_netdev(dev->ndev);
2870 flush_scheduled_work();
2872 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2873 tah_detach(dev->tah_dev, dev->tah_port);
2874 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2875 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2876 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2877 zmii_detach(dev->zmii_dev, dev->zmii_port);
2879 mal_unregister_commac(dev->mal, &dev->commac);
2882 emac_dbg_unregister(dev);
2883 iounmap(dev->emacp);
2885 if (dev->wol_irq != NO_IRQ)
2886 irq_dispose_mapping(dev->wol_irq);
2887 if (dev->emac_irq != NO_IRQ)
2888 irq_dispose_mapping(dev->emac_irq);
2895 /* XXX Features in here should be replaced by properties... */
2896 static struct of_device_id emac_match[] =
2900 .compatible = "ibm,emac",
2904 .compatible = "ibm,emac4",
2908 .compatible = "ibm,emac4sync",
2913 static struct of_platform_driver emac_driver = {
2915 .match_table = emac_match,
2917 .probe = emac_probe,
2918 .remove = emac_remove,
2921 static void __init emac_make_bootlist(void)
2923 struct device_node *np = NULL;
2924 int j, max, i = 0, k;
2925 int cell_indices[EMAC_BOOT_LIST_SIZE];
2928 while((np = of_find_all_nodes(np)) != NULL) {
2931 if (of_match_node(emac_match, np) == NULL)
2933 if (of_get_property(np, "unused", NULL))
2935 idx = of_get_property(np, "cell-index", NULL);
2938 cell_indices[i] = *idx;
2939 emac_boot_list[i++] = of_node_get(np);
2940 if (i >= EMAC_BOOT_LIST_SIZE) {
2947 /* Bubble sort them (doh, what a creative algorithm :-) */
2948 for (i = 0; max > 1 && (i < (max - 1)); i++)
2949 for (j = i; j < max; j++) {
2950 if (cell_indices[i] > cell_indices[j]) {
2951 np = emac_boot_list[i];
2952 emac_boot_list[i] = emac_boot_list[j];
2953 emac_boot_list[j] = np;
2954 k = cell_indices[i];
2955 cell_indices[i] = cell_indices[j];
2956 cell_indices[j] = k;
2961 static int __init emac_init(void)
2965 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2967 /* Init debug stuff */
2970 /* Build EMAC boot list */
2971 emac_make_bootlist();
2973 /* Init submodules */
2986 rc = of_register_platform_driver(&emac_driver);
3004 static void __exit emac_exit(void)
3008 of_unregister_platform_driver(&emac_driver);
3016 /* Destroy EMAC boot list */
3017 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3018 if (emac_boot_list[i])
3019 of_node_put(emac_boot_list[i]);
3022 module_init(emac_init);
3023 module_exit(emac_exit);