2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/sched.h>
28 #include <linux/string.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/crc32.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/bitops.h>
39 #include <linux/workqueue.h>
42 #include <asm/processor.h>
45 #include <asm/uaccess.h>
47 #include <asm/dcr-regs.h>
52 * Lack of dma_unmap_???? calls is intentional.
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
68 #define DRV_NAME "emac"
69 #define DRV_VERSION "3.54"
70 #define DRV_DESC "PPC 4xx OCP EMAC driver"
72 MODULE_DESCRIPTION(DRV_DESC);
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75 MODULE_LICENSE("GPL");
78 * PPC64 doesn't (yet) have a cacheable_memcpy
81 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
84 /* minimum number of free TX descriptors required to wake up TX process */
85 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
87 /* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
90 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
92 /* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
100 static u32 busy_phy_map;
101 static DEFINE_MUTEX(emac_phy_map_lock);
103 /* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
106 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
108 /* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
120 #define EMAC_BOOT_LIST_SIZE 4
121 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
123 /* How long should I wait for dependent devices ? */
124 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
126 /* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
129 static inline void emac_report_timeout_error(struct emac_instance *dev,
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_460EX_PHY_CLK_FIX |
134 EMAC_FTR_440EP_PHY_CLK_FIX))
135 DBG(dev, "%s" NL, error);
136 else if (net_ratelimit())
137 printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error);
140 /* EMAC PHY clock workaround:
141 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142 * which allows controlling each EMAC clock
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
146 #ifdef CONFIG_PPC_DCR_NATIVE
147 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 dcri_clrset(SDR0, SDR0_MFR,
149 0, SDR0_MFR_ECS >> dev->cell_index);
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
155 #ifdef CONFIG_PPC_DCR_NATIVE
156 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 dcri_clrset(SDR0, SDR0_MFR,
158 SDR0_MFR_ECS >> dev->cell_index, 0);
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON HZ
164 #define PHY_POLL_LINK_OFF (HZ / 5)
166 /* Graceful stop timeouts in us.
167 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
169 #define STOP_TIMEOUT_10 1230
170 #define STOP_TIMEOUT_100 124
171 #define STOP_TIMEOUT_1000 13
172 #define STOP_TIMEOUT_1000_JUMBO 73
174 static unsigned char default_mcast_addr[] = {
175 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 "tx_bd_excessive_collisions", "tx_bd_late_collision",
192 "tx_bd_multple_collisions", "tx_bd_single_collision",
193 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
201 static inline int emac_phy_supports_gige(int phy_mode)
203 return phy_mode == PHY_MODE_GMII ||
204 phy_mode == PHY_MODE_RGMII ||
205 phy_mode == PHY_MODE_SGMII ||
206 phy_mode == PHY_MODE_TBI ||
207 phy_mode == PHY_MODE_RTBI;
210 static inline int emac_phy_gpcs(int phy_mode)
212 return phy_mode == PHY_MODE_SGMII ||
213 phy_mode == PHY_MODE_TBI ||
214 phy_mode == PHY_MODE_RTBI;
217 static inline void emac_tx_enable(struct emac_instance *dev)
219 struct emac_regs __iomem *p = dev->emacp;
222 DBG(dev, "tx_enable" NL);
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_TXE))
226 out_be32(&p->mr0, r | EMAC_MR0_TXE);
229 static void emac_tx_disable(struct emac_instance *dev)
231 struct emac_regs __iomem *p = dev->emacp;
234 DBG(dev, "tx_disable" NL);
236 r = in_be32(&p->mr0);
237 if (r & EMAC_MR0_TXE) {
238 int n = dev->stop_timeout;
239 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
240 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 emac_report_timeout_error(dev, "TX disable timeout");
249 static void emac_rx_enable(struct emac_instance *dev)
251 struct emac_regs __iomem *p = dev->emacp;
254 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
257 DBG(dev, "rx_enable" NL);
259 r = in_be32(&p->mr0);
260 if (!(r & EMAC_MR0_RXE)) {
261 if (unlikely(!(r & EMAC_MR0_RXI))) {
262 /* Wait if previous async disable is still in progress */
263 int n = dev->stop_timeout;
264 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 emac_report_timeout_error(dev,
270 "RX disable timeout");
272 out_be32(&p->mr0, r | EMAC_MR0_RXE);
278 static void emac_rx_disable(struct emac_instance *dev)
280 struct emac_regs __iomem *p = dev->emacp;
283 DBG(dev, "rx_disable" NL);
285 r = in_be32(&p->mr0);
286 if (r & EMAC_MR0_RXE) {
287 int n = dev->stop_timeout;
288 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 emac_report_timeout_error(dev, "RX disable timeout");
298 static inline void emac_netif_stop(struct emac_instance *dev)
300 netif_tx_lock_bh(dev->ndev);
301 netif_addr_lock(dev->ndev);
303 netif_addr_unlock(dev->ndev);
304 netif_tx_unlock_bh(dev->ndev);
305 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
306 mal_poll_disable(dev->mal, &dev->commac);
307 netif_tx_disable(dev->ndev);
310 static inline void emac_netif_start(struct emac_instance *dev)
312 netif_tx_lock_bh(dev->ndev);
313 netif_addr_lock(dev->ndev);
315 if (dev->mcast_pending && netif_running(dev->ndev))
316 __emac_set_multicast_list(dev);
317 netif_addr_unlock(dev->ndev);
318 netif_tx_unlock_bh(dev->ndev);
320 netif_wake_queue(dev->ndev);
322 /* NOTE: unconditional netif_wake_queue is only appropriate
323 * so long as all callers are assured to have free tx slots
324 * (taken from tg3... though the case where that is wrong is
325 * not terribly harmful)
327 mal_poll_enable(dev->mal, &dev->commac);
330 static inline void emac_rx_disable_async(struct emac_instance *dev)
332 struct emac_regs __iomem *p = dev->emacp;
335 DBG(dev, "rx_disable_async" NL);
337 r = in_be32(&p->mr0);
338 if (r & EMAC_MR0_RXE)
339 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
342 static int emac_reset(struct emac_instance *dev)
344 struct emac_regs __iomem *p = dev->emacp;
347 DBG(dev, "reset" NL);
349 if (!dev->reset_failed) {
350 /* 40x erratum suggests stopping RX channel before reset,
353 emac_rx_disable(dev);
354 emac_tx_disable(dev);
357 #ifdef CONFIG_PPC_DCR_NATIVE
358 /* Enable internal clock source */
359 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
360 dcri_clrset(SDR0, SDR0_ETH_CFG,
361 0, SDR0_ETH_CFG_ECS << dev->cell_index);
364 out_be32(&p->mr0, EMAC_MR0_SRST);
365 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
368 #ifdef CONFIG_PPC_DCR_NATIVE
369 /* Enable external clock source */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
371 dcri_clrset(SDR0, SDR0_ETH_CFG,
372 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
376 dev->reset_failed = 0;
379 emac_report_timeout_error(dev, "reset timeout");
380 dev->reset_failed = 1;
385 static void emac_hash_mc(struct emac_instance *dev)
387 const int regs = EMAC_XAHT_REGS(dev);
388 u32 *gaht_base = emac_gaht_base(dev);
390 struct dev_mc_list *dmi;
393 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
395 memset(gaht_temp, 0, sizeof (gaht_temp));
397 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
399 DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
401 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
402 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
403 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
405 gaht_temp[reg] |= mask;
408 for (i = 0; i < regs; i++)
409 out_be32(gaht_base + i, gaht_temp[i]);
412 static inline u32 emac_iff2rmr(struct net_device *ndev)
414 struct emac_instance *dev = netdev_priv(ndev);
417 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
419 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
424 if (ndev->flags & IFF_PROMISC)
426 else if (ndev->flags & IFF_ALLMULTI ||
427 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
429 else if (ndev->mc_count > 0)
435 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
437 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
439 DBG2(dev, "__emac_calc_base_mr1" NL);
443 ret |= EMAC_MR1_TFS_2K;
446 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
447 dev->ndev->name, tx_size);
452 ret |= EMAC_MR1_RFS_16K;
455 ret |= EMAC_MR1_RFS_4K;
458 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
459 dev->ndev->name, rx_size);
465 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
467 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
468 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
470 DBG2(dev, "__emac4_calc_base_mr1" NL);
474 ret |= EMAC4_MR1_TFS_16K;
477 ret |= EMAC4_MR1_TFS_4K;
480 ret |= EMAC4_MR1_TFS_2K;
483 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
484 dev->ndev->name, tx_size);
489 ret |= EMAC4_MR1_RFS_16K;
492 ret |= EMAC4_MR1_RFS_4K;
495 ret |= EMAC4_MR1_RFS_2K;
498 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
499 dev->ndev->name, rx_size);
505 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
507 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
508 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
509 __emac_calc_base_mr1(dev, tx_size, rx_size);
512 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
514 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
515 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
517 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
520 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
521 unsigned int low, unsigned int high)
523 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
524 return (low << 22) | ( (high & 0x3ff) << 6);
526 return (low << 23) | ( (high & 0x1ff) << 7);
529 static int emac_configure(struct emac_instance *dev)
531 struct emac_regs __iomem *p = dev->emacp;
532 struct net_device *ndev = dev->ndev;
533 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
536 DBG(dev, "configure" NL);
539 out_be32(&p->mr1, in_be32(&p->mr1)
540 | EMAC_MR1_FDE | EMAC_MR1_ILE);
542 } else if (emac_reset(dev) < 0)
545 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
546 tah_reset(dev->tah_dev);
548 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
549 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
551 /* Default fifo sizes */
552 tx_size = dev->tx_fifo_size;
553 rx_size = dev->rx_fifo_size;
555 /* No link, force loopback */
557 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
559 /* Check for full duplex */
560 else if (dev->phy.duplex == DUPLEX_FULL)
561 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
563 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
564 dev->stop_timeout = STOP_TIMEOUT_10;
565 switch (dev->phy.speed) {
567 if (emac_phy_gpcs(dev->phy.mode)) {
568 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
569 (dev->phy.gpcs_address != 0xffffffff) ?
570 dev->phy.gpcs_address : dev->phy.address);
572 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
573 * identify this GPCS PHY later.
575 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
577 mr1 |= EMAC_MR1_MF_1000;
579 /* Extended fifo sizes */
580 tx_size = dev->tx_fifo_size_gige;
581 rx_size = dev->rx_fifo_size_gige;
583 if (dev->ndev->mtu > ETH_DATA_LEN) {
584 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
585 mr1 |= EMAC4_MR1_JPSM;
587 mr1 |= EMAC_MR1_JPSM;
588 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
590 dev->stop_timeout = STOP_TIMEOUT_1000;
593 mr1 |= EMAC_MR1_MF_100;
594 dev->stop_timeout = STOP_TIMEOUT_100;
596 default: /* make gcc happy */
600 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
601 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
603 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
604 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
606 /* on 40x erratum forces us to NOT use integrated flow control,
607 * let's hope it works on 44x ;)
609 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
610 dev->phy.duplex == DUPLEX_FULL) {
612 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
613 else if (dev->phy.asym_pause)
617 /* Add base settings & fifo sizes & program MR1 */
618 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
619 out_be32(&p->mr1, mr1);
621 /* Set individual MAC address */
622 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
623 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
624 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
627 /* VLAN Tag Protocol ID */
628 out_be32(&p->vtpid, 0x8100);
630 /* Receive mode register */
631 r = emac_iff2rmr(ndev);
632 if (r & EMAC_RMR_MAE)
634 out_be32(&p->rmr, r);
636 /* FIFOs thresholds */
637 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
638 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
639 tx_size / 2 / dev->fifo_entry_size);
641 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
642 tx_size / 2 / dev->fifo_entry_size);
643 out_be32(&p->tmr1, r);
644 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
646 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
647 there should be still enough space in FIFO to allow the our link
648 partner time to process this frame and also time to send PAUSE
651 Here is the worst case scenario for the RX FIFO "headroom"
652 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
654 1) One maximum-length frame on TX 1522 bytes
655 2) One PAUSE frame time 64 bytes
656 3) PAUSE frame decode time allowance 64 bytes
657 4) One maximum-length frame on RX 1522 bytes
658 5) Round-trip propagation delay of the link (100Mb) 15 bytes
662 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
663 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
665 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
666 rx_size / 4 / dev->fifo_entry_size);
667 out_be32(&p->rwmr, r);
669 /* Set PAUSE timer to the maximum */
670 out_be32(&p->ptr, 0xffff);
673 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
674 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
675 EMAC_ISR_IRE | EMAC_ISR_TE;
676 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
677 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
679 out_be32(&p->iser, r);
681 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
682 if (emac_phy_gpcs(dev->phy.mode)) {
683 if (dev->phy.gpcs_address != 0xffffffff)
684 emac_mii_reset_gpcs(&dev->phy);
686 emac_mii_reset_phy(&dev->phy);
692 static void emac_reinitialize(struct emac_instance *dev)
694 DBG(dev, "reinitialize" NL);
696 emac_netif_stop(dev);
697 if (!emac_configure(dev)) {
701 emac_netif_start(dev);
704 static void emac_full_tx_reset(struct emac_instance *dev)
706 DBG(dev, "full_tx_reset" NL);
708 emac_tx_disable(dev);
709 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
710 emac_clean_tx_ring(dev);
711 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
715 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
720 static void emac_reset_work(struct work_struct *work)
722 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
724 DBG(dev, "reset_work" NL);
726 mutex_lock(&dev->link_lock);
728 emac_netif_stop(dev);
729 emac_full_tx_reset(dev);
730 emac_netif_start(dev);
732 mutex_unlock(&dev->link_lock);
735 static void emac_tx_timeout(struct net_device *ndev)
737 struct emac_instance *dev = netdev_priv(ndev);
739 DBG(dev, "tx_timeout" NL);
741 schedule_work(&dev->reset_work);
745 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
747 int done = !!(stacr & EMAC_STACR_OC);
749 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
755 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
757 struct emac_regs __iomem *p = dev->emacp;
759 int n, err = -ETIMEDOUT;
761 mutex_lock(&dev->mdio_lock);
763 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
765 /* Enable proper MDIO port */
766 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
767 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
768 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
769 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
771 /* Wait for management interface to become idle */
773 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
776 DBG2(dev, " -> timeout wait idle\n");
781 /* Issue read command */
782 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
783 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
785 r = EMAC_STACR_BASE(dev->opb_bus_freq);
786 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
788 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
789 r |= EMACX_STACR_STAC_READ;
791 r |= EMAC_STACR_STAC_READ;
792 r |= (reg & EMAC_STACR_PRA_MASK)
793 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
794 out_be32(&p->stacr, r);
796 /* Wait for read to complete */
798 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
801 DBG2(dev, " -> timeout wait complete\n");
806 if (unlikely(r & EMAC_STACR_PHYE)) {
807 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
812 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
814 DBG2(dev, "mdio_read -> %04x" NL, r);
817 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
818 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
819 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
820 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
821 mutex_unlock(&dev->mdio_lock);
823 return err == 0 ? r : err;
826 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
829 struct emac_regs __iomem *p = dev->emacp;
831 int n, err = -ETIMEDOUT;
833 mutex_lock(&dev->mdio_lock);
835 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
837 /* Enable proper MDIO port */
838 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
839 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
840 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
841 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
843 /* Wait for management interface to be idle */
845 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
848 DBG2(dev, " -> timeout wait idle\n");
853 /* Issue write command */
854 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
855 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
857 r = EMAC_STACR_BASE(dev->opb_bus_freq);
858 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
860 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
861 r |= EMACX_STACR_STAC_WRITE;
863 r |= EMAC_STACR_STAC_WRITE;
864 r |= (reg & EMAC_STACR_PRA_MASK) |
865 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
866 (val << EMAC_STACR_PHYD_SHIFT);
867 out_be32(&p->stacr, r);
869 /* Wait for write to complete */
871 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
874 DBG2(dev, " -> timeout wait complete\n");
880 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
881 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
882 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
883 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
884 mutex_unlock(&dev->mdio_lock);
887 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
889 struct emac_instance *dev = netdev_priv(ndev);
892 res = __emac_mdio_read((dev->mdio_instance &&
893 dev->phy.gpcs_address != id) ?
894 dev->mdio_instance : dev,
899 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
901 struct emac_instance *dev = netdev_priv(ndev);
903 __emac_mdio_write((dev->mdio_instance &&
904 dev->phy.gpcs_address != id) ?
905 dev->mdio_instance : dev,
906 (u8) id, (u8) reg, (u16) val);
910 static void __emac_set_multicast_list(struct emac_instance *dev)
912 struct emac_regs __iomem *p = dev->emacp;
913 u32 rmr = emac_iff2rmr(dev->ndev);
915 DBG(dev, "__multicast %08x" NL, rmr);
917 /* I decided to relax register access rules here to avoid
920 * There is a real problem with EMAC4 core if we use MWSW_001 bit
921 * in MR1 register and do a full EMAC reset.
922 * One TX BD status update is delayed and, after EMAC reset, it
923 * never happens, resulting in TX hung (it'll be recovered by TX
924 * timeout handler eventually, but this is just gross).
925 * So we either have to do full TX reset or try to cheat here :)
927 * The only required change is to RX mode register, so I *think* all
928 * we need is just to stop RX channel. This seems to work on all
931 * If we need the full reset, we might just trigger the workqueue
932 * and do it async... a bit nasty but should work --BenH
934 dev->mcast_pending = 0;
935 emac_rx_disable(dev);
936 if (rmr & EMAC_RMR_MAE)
938 out_be32(&p->rmr, rmr);
943 static void emac_set_multicast_list(struct net_device *ndev)
945 struct emac_instance *dev = netdev_priv(ndev);
947 DBG(dev, "multicast" NL);
949 BUG_ON(!netif_running(dev->ndev));
952 dev->mcast_pending = 1;
955 __emac_set_multicast_list(dev);
958 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
960 int rx_sync_size = emac_rx_sync_size(new_mtu);
961 int rx_skb_size = emac_rx_skb_size(new_mtu);
964 mutex_lock(&dev->link_lock);
965 emac_netif_stop(dev);
966 emac_rx_disable(dev);
967 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
969 if (dev->rx_sg_skb) {
970 ++dev->estats.rx_dropped_resize;
971 dev_kfree_skb(dev->rx_sg_skb);
972 dev->rx_sg_skb = NULL;
975 /* Make a first pass over RX ring and mark BDs ready, dropping
976 * non-processed packets on the way. We need this as a separate pass
977 * to simplify error recovery in the case of allocation failure later.
979 for (i = 0; i < NUM_RX_BUFF; ++i) {
980 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
981 ++dev->estats.rx_dropped_resize;
983 dev->rx_desc[i].data_len = 0;
984 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
985 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
988 /* Reallocate RX ring only if bigger skb buffers are required */
989 if (rx_skb_size <= dev->rx_skb_size)
992 /* Second pass, allocate new skbs */
993 for (i = 0; i < NUM_RX_BUFF; ++i) {
994 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1000 BUG_ON(!dev->rx_skb[i]);
1001 dev_kfree_skb(dev->rx_skb[i]);
1003 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1004 dev->rx_desc[i].data_ptr =
1005 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1006 DMA_FROM_DEVICE) + 2;
1007 dev->rx_skb[i] = skb;
1010 /* Check if we need to change "Jumbo" bit in MR1 */
1011 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1012 /* This is to prevent starting RX channel in emac_rx_enable() */
1013 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1015 dev->ndev->mtu = new_mtu;
1016 emac_full_tx_reset(dev);
1019 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1022 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1024 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1025 emac_rx_enable(dev);
1026 emac_netif_start(dev);
1027 mutex_unlock(&dev->link_lock);
1032 /* Process ctx, rtnl_lock semaphore */
1033 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1035 struct emac_instance *dev = netdev_priv(ndev);
1038 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1041 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1043 if (netif_running(ndev)) {
1044 /* Check if we really need to reinitalize RX ring */
1045 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1046 ret = emac_resize_rx_ring(dev, new_mtu);
1050 ndev->mtu = new_mtu;
1051 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1052 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1058 static void emac_clean_tx_ring(struct emac_instance *dev)
1062 for (i = 0; i < NUM_TX_BUFF; ++i) {
1063 if (dev->tx_skb[i]) {
1064 dev_kfree_skb(dev->tx_skb[i]);
1065 dev->tx_skb[i] = NULL;
1066 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1067 ++dev->estats.tx_dropped;
1069 dev->tx_desc[i].ctrl = 0;
1070 dev->tx_desc[i].data_ptr = 0;
1074 static void emac_clean_rx_ring(struct emac_instance *dev)
1078 for (i = 0; i < NUM_RX_BUFF; ++i)
1079 if (dev->rx_skb[i]) {
1080 dev->rx_desc[i].ctrl = 0;
1081 dev_kfree_skb(dev->rx_skb[i]);
1082 dev->rx_skb[i] = NULL;
1083 dev->rx_desc[i].data_ptr = 0;
1086 if (dev->rx_sg_skb) {
1087 dev_kfree_skb(dev->rx_sg_skb);
1088 dev->rx_sg_skb = NULL;
1092 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1095 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1099 dev->rx_skb[slot] = skb;
1100 dev->rx_desc[slot].data_len = 0;
1102 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1103 dev->rx_desc[slot].data_ptr =
1104 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1105 DMA_FROM_DEVICE) + 2;
1107 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1108 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1113 static void emac_print_link_status(struct emac_instance *dev)
1115 if (netif_carrier_ok(dev->ndev))
1116 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1117 dev->ndev->name, dev->phy.speed,
1118 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1119 dev->phy.pause ? ", pause enabled" :
1120 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1122 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1125 /* Process ctx, rtnl_lock semaphore */
1126 static int emac_open(struct net_device *ndev)
1128 struct emac_instance *dev = netdev_priv(ndev);
1131 DBG(dev, "open" NL);
1133 /* Setup error IRQ handler */
1134 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1136 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1137 ndev->name, dev->emac_irq);
1141 /* Allocate RX ring */
1142 for (i = 0; i < NUM_RX_BUFF; ++i)
1143 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1144 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1149 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1150 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1151 dev->rx_sg_skb = NULL;
1153 mutex_lock(&dev->link_lock);
1156 /* Start PHY polling now.
1158 if (dev->phy.address >= 0) {
1159 int link_poll_interval;
1160 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1161 dev->phy.def->ops->read_link(&dev->phy);
1162 emac_rx_clk_default(dev);
1163 netif_carrier_on(dev->ndev);
1164 link_poll_interval = PHY_POLL_LINK_ON;
1166 emac_rx_clk_tx(dev);
1167 netif_carrier_off(dev->ndev);
1168 link_poll_interval = PHY_POLL_LINK_OFF;
1170 dev->link_polling = 1;
1172 schedule_delayed_work(&dev->link_work, link_poll_interval);
1173 emac_print_link_status(dev);
1175 netif_carrier_on(dev->ndev);
1177 /* Required for Pause packet support in EMAC */
1178 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1180 emac_configure(dev);
1181 mal_poll_add(dev->mal, &dev->commac);
1182 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1183 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1184 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1185 emac_tx_enable(dev);
1186 emac_rx_enable(dev);
1187 emac_netif_start(dev);
1189 mutex_unlock(&dev->link_lock);
1193 emac_clean_rx_ring(dev);
1194 free_irq(dev->emac_irq, dev);
1201 static int emac_link_differs(struct emac_instance *dev)
1203 u32 r = in_be32(&dev->emacp->mr1);
1205 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1206 int speed, pause, asym_pause;
1208 if (r & EMAC_MR1_MF_1000)
1210 else if (r & EMAC_MR1_MF_100)
1215 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1216 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1225 pause = asym_pause = 0;
1227 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1228 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1232 static void emac_link_timer(struct work_struct *work)
1234 struct emac_instance *dev =
1235 container_of(to_delayed_work(work),
1236 struct emac_instance, link_work);
1237 int link_poll_interval;
1239 mutex_lock(&dev->link_lock);
1240 DBG2(dev, "link timer" NL);
1245 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1246 if (!netif_carrier_ok(dev->ndev)) {
1247 emac_rx_clk_default(dev);
1248 /* Get new link parameters */
1249 dev->phy.def->ops->read_link(&dev->phy);
1251 netif_carrier_on(dev->ndev);
1252 emac_netif_stop(dev);
1253 emac_full_tx_reset(dev);
1254 emac_netif_start(dev);
1255 emac_print_link_status(dev);
1257 link_poll_interval = PHY_POLL_LINK_ON;
1259 if (netif_carrier_ok(dev->ndev)) {
1260 emac_rx_clk_tx(dev);
1261 netif_carrier_off(dev->ndev);
1262 netif_tx_disable(dev->ndev);
1263 emac_reinitialize(dev);
1264 emac_print_link_status(dev);
1266 link_poll_interval = PHY_POLL_LINK_OFF;
1268 schedule_delayed_work(&dev->link_work, link_poll_interval);
1270 mutex_unlock(&dev->link_lock);
1273 static void emac_force_link_update(struct emac_instance *dev)
1275 netif_carrier_off(dev->ndev);
1277 if (dev->link_polling) {
1278 cancel_rearming_delayed_work(&dev->link_work);
1279 if (dev->link_polling)
1280 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1284 /* Process ctx, rtnl_lock semaphore */
1285 static int emac_close(struct net_device *ndev)
1287 struct emac_instance *dev = netdev_priv(ndev);
1289 DBG(dev, "close" NL);
1291 if (dev->phy.address >= 0) {
1292 dev->link_polling = 0;
1293 cancel_rearming_delayed_work(&dev->link_work);
1295 mutex_lock(&dev->link_lock);
1296 emac_netif_stop(dev);
1298 mutex_unlock(&dev->link_lock);
1300 emac_rx_disable(dev);
1301 emac_tx_disable(dev);
1302 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1303 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1304 mal_poll_del(dev->mal, &dev->commac);
1306 emac_clean_tx_ring(dev);
1307 emac_clean_rx_ring(dev);
1309 free_irq(dev->emac_irq, dev);
1311 netif_carrier_off(ndev);
1316 static inline u16 emac_tx_csum(struct emac_instance *dev,
1317 struct sk_buff *skb)
1319 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1320 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1321 ++dev->stats.tx_packets_csum;
1322 return EMAC_TX_CTRL_TAH_CSUM;
1327 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1329 struct emac_regs __iomem *p = dev->emacp;
1330 struct net_device *ndev = dev->ndev;
1332 /* Send the packet out. If the if makes a significant perf
1333 * difference, then we can store the TMR0 value in "dev"
1336 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1337 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1339 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1341 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1342 netif_stop_queue(ndev);
1343 DBG2(dev, "stopped TX queue" NL);
1346 ndev->trans_start = jiffies;
1347 ++dev->stats.tx_packets;
1348 dev->stats.tx_bytes += len;
1350 return NETDEV_TX_OK;
1354 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1356 struct emac_instance *dev = netdev_priv(ndev);
1357 unsigned int len = skb->len;
1360 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1361 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1363 slot = dev->tx_slot++;
1364 if (dev->tx_slot == NUM_TX_BUFF) {
1366 ctrl |= MAL_TX_CTRL_WRAP;
1369 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1371 dev->tx_skb[slot] = skb;
1372 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1375 dev->tx_desc[slot].data_len = (u16) len;
1377 dev->tx_desc[slot].ctrl = ctrl;
1379 return emac_xmit_finish(dev, len);
1382 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1383 u32 pd, int len, int last, u16 base_ctrl)
1386 u16 ctrl = base_ctrl;
1387 int chunk = min(len, MAL_MAX_TX_SIZE);
1390 slot = (slot + 1) % NUM_TX_BUFF;
1393 ctrl |= MAL_TX_CTRL_LAST;
1394 if (slot == NUM_TX_BUFF - 1)
1395 ctrl |= MAL_TX_CTRL_WRAP;
1397 dev->tx_skb[slot] = NULL;
1398 dev->tx_desc[slot].data_ptr = pd;
1399 dev->tx_desc[slot].data_len = (u16) chunk;
1400 dev->tx_desc[slot].ctrl = ctrl;
1411 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1412 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1414 struct emac_instance *dev = netdev_priv(ndev);
1415 int nr_frags = skb_shinfo(skb)->nr_frags;
1416 int len = skb->len, chunk;
1421 /* This is common "fast" path */
1422 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1423 return emac_start_xmit(skb, ndev);
1425 len -= skb->data_len;
1427 /* Note, this is only an *estimation*, we can still run out of empty
1428 * slots because of the additional fragmentation into
1429 * MAL_MAX_TX_SIZE-sized chunks
1431 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1434 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1435 emac_tx_csum(dev, skb);
1436 slot = dev->tx_slot;
1439 dev->tx_skb[slot] = NULL;
1440 chunk = min(len, MAL_MAX_TX_SIZE);
1441 dev->tx_desc[slot].data_ptr = pd =
1442 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1443 dev->tx_desc[slot].data_len = (u16) chunk;
1446 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1449 for (i = 0; i < nr_frags; ++i) {
1450 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1453 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1456 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1459 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1463 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1465 /* Attach skb to the last slot so we don't release it too early */
1466 dev->tx_skb[slot] = skb;
1468 /* Send the packet out */
1469 if (dev->tx_slot == NUM_TX_BUFF - 1)
1470 ctrl |= MAL_TX_CTRL_WRAP;
1472 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1473 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1475 return emac_xmit_finish(dev, skb->len);
1478 /* Well, too bad. Our previous estimation was overly optimistic.
1481 while (slot != dev->tx_slot) {
1482 dev->tx_desc[slot].ctrl = 0;
1485 slot = NUM_TX_BUFF - 1;
1487 ++dev->estats.tx_undo;
1490 netif_stop_queue(ndev);
1491 DBG2(dev, "stopped TX queue" NL);
1492 return NETDEV_TX_BUSY;
1496 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1498 struct emac_error_stats *st = &dev->estats;
1500 DBG(dev, "BD TX error %04x" NL, ctrl);
1503 if (ctrl & EMAC_TX_ST_BFCS)
1504 ++st->tx_bd_bad_fcs;
1505 if (ctrl & EMAC_TX_ST_LCS)
1506 ++st->tx_bd_carrier_loss;
1507 if (ctrl & EMAC_TX_ST_ED)
1508 ++st->tx_bd_excessive_deferral;
1509 if (ctrl & EMAC_TX_ST_EC)
1510 ++st->tx_bd_excessive_collisions;
1511 if (ctrl & EMAC_TX_ST_LC)
1512 ++st->tx_bd_late_collision;
1513 if (ctrl & EMAC_TX_ST_MC)
1514 ++st->tx_bd_multple_collisions;
1515 if (ctrl & EMAC_TX_ST_SC)
1516 ++st->tx_bd_single_collision;
1517 if (ctrl & EMAC_TX_ST_UR)
1518 ++st->tx_bd_underrun;
1519 if (ctrl & EMAC_TX_ST_SQE)
1523 static void emac_poll_tx(void *param)
1525 struct emac_instance *dev = param;
1528 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1530 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1531 bad_mask = EMAC_IS_BAD_TX_TAH;
1533 bad_mask = EMAC_IS_BAD_TX;
1535 netif_tx_lock_bh(dev->ndev);
1538 int slot = dev->ack_slot, n = 0;
1540 ctrl = dev->tx_desc[slot].ctrl;
1541 if (!(ctrl & MAL_TX_CTRL_READY)) {
1542 struct sk_buff *skb = dev->tx_skb[slot];
1547 dev->tx_skb[slot] = NULL;
1549 slot = (slot + 1) % NUM_TX_BUFF;
1551 if (unlikely(ctrl & bad_mask))
1552 emac_parse_tx_error(dev, ctrl);
1558 dev->ack_slot = slot;
1559 if (netif_queue_stopped(dev->ndev) &&
1560 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1561 netif_wake_queue(dev->ndev);
1563 DBG2(dev, "tx %d pkts" NL, n);
1566 netif_tx_unlock_bh(dev->ndev);
1569 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1572 struct sk_buff *skb = dev->rx_skb[slot];
1574 DBG2(dev, "recycle %d %d" NL, slot, len);
1577 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1578 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1580 dev->rx_desc[slot].data_len = 0;
1582 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1583 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1586 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1588 struct emac_error_stats *st = &dev->estats;
1590 DBG(dev, "BD RX error %04x" NL, ctrl);
1593 if (ctrl & EMAC_RX_ST_OE)
1594 ++st->rx_bd_overrun;
1595 if (ctrl & EMAC_RX_ST_BP)
1596 ++st->rx_bd_bad_packet;
1597 if (ctrl & EMAC_RX_ST_RP)
1598 ++st->rx_bd_runt_packet;
1599 if (ctrl & EMAC_RX_ST_SE)
1600 ++st->rx_bd_short_event;
1601 if (ctrl & EMAC_RX_ST_AE)
1602 ++st->rx_bd_alignment_error;
1603 if (ctrl & EMAC_RX_ST_BFCS)
1604 ++st->rx_bd_bad_fcs;
1605 if (ctrl & EMAC_RX_ST_PTL)
1606 ++st->rx_bd_packet_too_long;
1607 if (ctrl & EMAC_RX_ST_ORE)
1608 ++st->rx_bd_out_of_range;
1609 if (ctrl & EMAC_RX_ST_IRE)
1610 ++st->rx_bd_in_range;
1613 static inline void emac_rx_csum(struct emac_instance *dev,
1614 struct sk_buff *skb, u16 ctrl)
1616 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1617 if (!ctrl && dev->tah_dev) {
1618 skb->ip_summed = CHECKSUM_UNNECESSARY;
1619 ++dev->stats.rx_packets_csum;
1624 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1626 if (likely(dev->rx_sg_skb != NULL)) {
1627 int len = dev->rx_desc[slot].data_len;
1628 int tot_len = dev->rx_sg_skb->len + len;
1630 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1631 ++dev->estats.rx_dropped_mtu;
1632 dev_kfree_skb(dev->rx_sg_skb);
1633 dev->rx_sg_skb = NULL;
1635 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1636 dev->rx_skb[slot]->data, len);
1637 skb_put(dev->rx_sg_skb, len);
1638 emac_recycle_rx_skb(dev, slot, len);
1642 emac_recycle_rx_skb(dev, slot, 0);
1646 /* NAPI poll context */
1647 static int emac_poll_rx(void *param, int budget)
1649 struct emac_instance *dev = param;
1650 int slot = dev->rx_slot, received = 0;
1652 DBG2(dev, "poll_rx(%d)" NL, budget);
1655 while (budget > 0) {
1657 struct sk_buff *skb;
1658 u16 ctrl = dev->rx_desc[slot].ctrl;
1660 if (ctrl & MAL_RX_CTRL_EMPTY)
1663 skb = dev->rx_skb[slot];
1665 len = dev->rx_desc[slot].data_len;
1667 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1670 ctrl &= EMAC_BAD_RX_MASK;
1671 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1672 emac_parse_rx_error(dev, ctrl);
1673 ++dev->estats.rx_dropped_error;
1674 emac_recycle_rx_skb(dev, slot, 0);
1679 if (len < ETH_HLEN) {
1680 ++dev->estats.rx_dropped_stack;
1681 emac_recycle_rx_skb(dev, slot, len);
1685 if (len && len < EMAC_RX_COPY_THRESH) {
1686 struct sk_buff *copy_skb =
1687 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1688 if (unlikely(!copy_skb))
1691 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1692 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1694 emac_recycle_rx_skb(dev, slot, len);
1696 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1701 skb->dev = dev->ndev;
1702 skb->protocol = eth_type_trans(skb, dev->ndev);
1703 emac_rx_csum(dev, skb, ctrl);
1705 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1706 ++dev->estats.rx_dropped_stack;
1708 ++dev->stats.rx_packets;
1710 dev->stats.rx_bytes += len;
1711 slot = (slot + 1) % NUM_RX_BUFF;
1716 if (ctrl & MAL_RX_CTRL_FIRST) {
1717 BUG_ON(dev->rx_sg_skb);
1718 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1719 DBG(dev, "rx OOM %d" NL, slot);
1720 ++dev->estats.rx_dropped_oom;
1721 emac_recycle_rx_skb(dev, slot, 0);
1723 dev->rx_sg_skb = skb;
1726 } else if (!emac_rx_sg_append(dev, slot) &&
1727 (ctrl & MAL_RX_CTRL_LAST)) {
1729 skb = dev->rx_sg_skb;
1730 dev->rx_sg_skb = NULL;
1732 ctrl &= EMAC_BAD_RX_MASK;
1733 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1734 emac_parse_rx_error(dev, ctrl);
1735 ++dev->estats.rx_dropped_error;
1743 DBG(dev, "rx OOM %d" NL, slot);
1744 /* Drop the packet and recycle skb */
1745 ++dev->estats.rx_dropped_oom;
1746 emac_recycle_rx_skb(dev, slot, 0);
1751 DBG2(dev, "rx %d BDs" NL, received);
1752 dev->rx_slot = slot;
1755 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1757 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1758 DBG2(dev, "rx restart" NL);
1763 if (dev->rx_sg_skb) {
1764 DBG2(dev, "dropping partial rx packet" NL);
1765 ++dev->estats.rx_dropped_error;
1766 dev_kfree_skb(dev->rx_sg_skb);
1767 dev->rx_sg_skb = NULL;
1770 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1771 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1772 emac_rx_enable(dev);
1778 /* NAPI poll context */
1779 static int emac_peek_rx(void *param)
1781 struct emac_instance *dev = param;
1783 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1786 /* NAPI poll context */
1787 static int emac_peek_rx_sg(void *param)
1789 struct emac_instance *dev = param;
1791 int slot = dev->rx_slot;
1793 u16 ctrl = dev->rx_desc[slot].ctrl;
1794 if (ctrl & MAL_RX_CTRL_EMPTY)
1796 else if (ctrl & MAL_RX_CTRL_LAST)
1799 slot = (slot + 1) % NUM_RX_BUFF;
1801 /* I'm just being paranoid here :) */
1802 if (unlikely(slot == dev->rx_slot))
1808 static void emac_rxde(void *param)
1810 struct emac_instance *dev = param;
1812 ++dev->estats.rx_stopped;
1813 emac_rx_disable_async(dev);
1817 static irqreturn_t emac_irq(int irq, void *dev_instance)
1819 struct emac_instance *dev = dev_instance;
1820 struct emac_regs __iomem *p = dev->emacp;
1821 struct emac_error_stats *st = &dev->estats;
1824 spin_lock(&dev->lock);
1826 isr = in_be32(&p->isr);
1827 out_be32(&p->isr, isr);
1829 DBG(dev, "isr = %08x" NL, isr);
1831 if (isr & EMAC4_ISR_TXPE)
1833 if (isr & EMAC4_ISR_RXPE)
1835 if (isr & EMAC4_ISR_TXUE)
1837 if (isr & EMAC4_ISR_RXOE)
1838 ++st->rx_fifo_overrun;
1839 if (isr & EMAC_ISR_OVR)
1841 if (isr & EMAC_ISR_BP)
1842 ++st->rx_bad_packet;
1843 if (isr & EMAC_ISR_RP)
1844 ++st->rx_runt_packet;
1845 if (isr & EMAC_ISR_SE)
1846 ++st->rx_short_event;
1847 if (isr & EMAC_ISR_ALE)
1848 ++st->rx_alignment_error;
1849 if (isr & EMAC_ISR_BFCS)
1851 if (isr & EMAC_ISR_PTLE)
1852 ++st->rx_packet_too_long;
1853 if (isr & EMAC_ISR_ORE)
1854 ++st->rx_out_of_range;
1855 if (isr & EMAC_ISR_IRE)
1857 if (isr & EMAC_ISR_SQE)
1859 if (isr & EMAC_ISR_TE)
1862 spin_unlock(&dev->lock);
1867 static struct net_device_stats *emac_stats(struct net_device *ndev)
1869 struct emac_instance *dev = netdev_priv(ndev);
1870 struct emac_stats *st = &dev->stats;
1871 struct emac_error_stats *est = &dev->estats;
1872 struct net_device_stats *nst = &dev->nstats;
1873 unsigned long flags;
1875 DBG2(dev, "stats" NL);
1877 /* Compute "legacy" statistics */
1878 spin_lock_irqsave(&dev->lock, flags);
1879 nst->rx_packets = (unsigned long)st->rx_packets;
1880 nst->rx_bytes = (unsigned long)st->rx_bytes;
1881 nst->tx_packets = (unsigned long)st->tx_packets;
1882 nst->tx_bytes = (unsigned long)st->tx_bytes;
1883 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1884 est->rx_dropped_error +
1885 est->rx_dropped_resize +
1886 est->rx_dropped_mtu);
1887 nst->tx_dropped = (unsigned long)est->tx_dropped;
1889 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1890 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1891 est->rx_fifo_overrun +
1893 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1894 est->rx_alignment_error);
1895 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1897 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1898 est->rx_bd_short_event +
1899 est->rx_bd_packet_too_long +
1900 est->rx_bd_out_of_range +
1901 est->rx_bd_in_range +
1902 est->rx_runt_packet +
1903 est->rx_short_event +
1904 est->rx_packet_too_long +
1905 est->rx_out_of_range +
1908 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1909 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1911 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1912 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1913 est->tx_bd_excessive_collisions +
1914 est->tx_bd_late_collision +
1915 est->tx_bd_multple_collisions);
1916 spin_unlock_irqrestore(&dev->lock, flags);
1920 static struct mal_commac_ops emac_commac_ops = {
1921 .poll_tx = &emac_poll_tx,
1922 .poll_rx = &emac_poll_rx,
1923 .peek_rx = &emac_peek_rx,
1927 static struct mal_commac_ops emac_commac_sg_ops = {
1928 .poll_tx = &emac_poll_tx,
1929 .poll_rx = &emac_poll_rx,
1930 .peek_rx = &emac_peek_rx_sg,
1934 /* Ethtool support */
1935 static int emac_ethtool_get_settings(struct net_device *ndev,
1936 struct ethtool_cmd *cmd)
1938 struct emac_instance *dev = netdev_priv(ndev);
1940 cmd->supported = dev->phy.features;
1941 cmd->port = PORT_MII;
1942 cmd->phy_address = dev->phy.address;
1944 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1946 mutex_lock(&dev->link_lock);
1947 cmd->advertising = dev->phy.advertising;
1948 cmd->autoneg = dev->phy.autoneg;
1949 cmd->speed = dev->phy.speed;
1950 cmd->duplex = dev->phy.duplex;
1951 mutex_unlock(&dev->link_lock);
1956 static int emac_ethtool_set_settings(struct net_device *ndev,
1957 struct ethtool_cmd *cmd)
1959 struct emac_instance *dev = netdev_priv(ndev);
1960 u32 f = dev->phy.features;
1962 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1963 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1965 /* Basic sanity checks */
1966 if (dev->phy.address < 0)
1968 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1970 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1972 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1975 if (cmd->autoneg == AUTONEG_DISABLE) {
1976 switch (cmd->speed) {
1978 if (cmd->duplex == DUPLEX_HALF
1979 && !(f & SUPPORTED_10baseT_Half))
1981 if (cmd->duplex == DUPLEX_FULL
1982 && !(f & SUPPORTED_10baseT_Full))
1986 if (cmd->duplex == DUPLEX_HALF
1987 && !(f & SUPPORTED_100baseT_Half))
1989 if (cmd->duplex == DUPLEX_FULL
1990 && !(f & SUPPORTED_100baseT_Full))
1994 if (cmd->duplex == DUPLEX_HALF
1995 && !(f & SUPPORTED_1000baseT_Half))
1997 if (cmd->duplex == DUPLEX_FULL
1998 && !(f & SUPPORTED_1000baseT_Full))
2005 mutex_lock(&dev->link_lock);
2006 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2008 mutex_unlock(&dev->link_lock);
2011 if (!(f & SUPPORTED_Autoneg))
2014 mutex_lock(&dev->link_lock);
2015 dev->phy.def->ops->setup_aneg(&dev->phy,
2016 (cmd->advertising & f) |
2017 (dev->phy.advertising &
2019 ADVERTISED_Asym_Pause)));
2020 mutex_unlock(&dev->link_lock);
2022 emac_force_link_update(dev);
2027 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2028 struct ethtool_ringparam *rp)
2030 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2031 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2034 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2035 struct ethtool_pauseparam *pp)
2037 struct emac_instance *dev = netdev_priv(ndev);
2039 mutex_lock(&dev->link_lock);
2040 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2041 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2044 if (dev->phy.duplex == DUPLEX_FULL) {
2046 pp->rx_pause = pp->tx_pause = 1;
2047 else if (dev->phy.asym_pause)
2050 mutex_unlock(&dev->link_lock);
2053 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2055 struct emac_instance *dev = netdev_priv(ndev);
2057 return dev->tah_dev != NULL;
2060 static int emac_get_regs_len(struct emac_instance *dev)
2062 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2063 return sizeof(struct emac_ethtool_regs_subhdr) +
2064 EMAC4_ETHTOOL_REGS_SIZE(dev);
2066 return sizeof(struct emac_ethtool_regs_subhdr) +
2067 EMAC_ETHTOOL_REGS_SIZE(dev);
2070 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2072 struct emac_instance *dev = netdev_priv(ndev);
2075 size = sizeof(struct emac_ethtool_regs_hdr) +
2076 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2077 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2078 size += zmii_get_regs_len(dev->zmii_dev);
2079 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2080 size += rgmii_get_regs_len(dev->rgmii_dev);
2081 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2082 size += tah_get_regs_len(dev->tah_dev);
2087 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2089 struct emac_ethtool_regs_subhdr *hdr = buf;
2091 hdr->index = dev->cell_index;
2092 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2093 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2094 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2095 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
2097 hdr->version = EMAC_ETHTOOL_REGS_VER;
2098 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2099 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
2103 static void emac_ethtool_get_regs(struct net_device *ndev,
2104 struct ethtool_regs *regs, void *buf)
2106 struct emac_instance *dev = netdev_priv(ndev);
2107 struct emac_ethtool_regs_hdr *hdr = buf;
2109 hdr->components = 0;
2112 buf = mal_dump_regs(dev->mal, buf);
2113 buf = emac_dump_regs(dev, buf);
2114 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2115 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2116 buf = zmii_dump_regs(dev->zmii_dev, buf);
2118 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2119 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2120 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2122 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2123 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2124 buf = tah_dump_regs(dev->tah_dev, buf);
2128 static int emac_ethtool_nway_reset(struct net_device *ndev)
2130 struct emac_instance *dev = netdev_priv(ndev);
2133 DBG(dev, "nway_reset" NL);
2135 if (dev->phy.address < 0)
2138 mutex_lock(&dev->link_lock);
2139 if (!dev->phy.autoneg) {
2144 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2146 mutex_unlock(&dev->link_lock);
2147 emac_force_link_update(dev);
2151 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2153 if (stringset == ETH_SS_STATS)
2154 return EMAC_ETHTOOL_STATS_COUNT;
2159 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2162 if (stringset == ETH_SS_STATS)
2163 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2166 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2167 struct ethtool_stats *estats,
2170 struct emac_instance *dev = netdev_priv(ndev);
2172 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2173 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2174 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2177 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2178 struct ethtool_drvinfo *info)
2180 struct emac_instance *dev = netdev_priv(ndev);
2182 strcpy(info->driver, "ibm_emac");
2183 strcpy(info->version, DRV_VERSION);
2184 info->fw_version[0] = '\0';
2185 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2186 dev->cell_index, dev->ofdev->node->full_name);
2187 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2190 static const struct ethtool_ops emac_ethtool_ops = {
2191 .get_settings = emac_ethtool_get_settings,
2192 .set_settings = emac_ethtool_set_settings,
2193 .get_drvinfo = emac_ethtool_get_drvinfo,
2195 .get_regs_len = emac_ethtool_get_regs_len,
2196 .get_regs = emac_ethtool_get_regs,
2198 .nway_reset = emac_ethtool_nway_reset,
2200 .get_ringparam = emac_ethtool_get_ringparam,
2201 .get_pauseparam = emac_ethtool_get_pauseparam,
2203 .get_rx_csum = emac_ethtool_get_rx_csum,
2205 .get_strings = emac_ethtool_get_strings,
2206 .get_sset_count = emac_ethtool_get_sset_count,
2207 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2209 .get_link = ethtool_op_get_link,
2210 .get_tx_csum = ethtool_op_get_tx_csum,
2211 .get_sg = ethtool_op_get_sg,
2214 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2216 struct emac_instance *dev = netdev_priv(ndev);
2217 struct mii_ioctl_data *data = if_mii(rq);
2219 DBG(dev, "ioctl %08x" NL, cmd);
2221 if (dev->phy.address < 0)
2226 data->phy_id = dev->phy.address;
2229 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2234 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2242 struct emac_depentry {
2244 struct device_node *node;
2245 struct of_device *ofdev;
2249 #define EMAC_DEP_MAL_IDX 0
2250 #define EMAC_DEP_ZMII_IDX 1
2251 #define EMAC_DEP_RGMII_IDX 2
2252 #define EMAC_DEP_TAH_IDX 3
2253 #define EMAC_DEP_MDIO_IDX 4
2254 #define EMAC_DEP_PREV_IDX 5
2255 #define EMAC_DEP_COUNT 6
2257 static int __devinit emac_check_deps(struct emac_instance *dev,
2258 struct emac_depentry *deps)
2261 struct device_node *np;
2263 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2264 /* no dependency on that item, allright */
2265 if (deps[i].phandle == 0) {
2269 /* special case for blist as the dependency might go away */
2270 if (i == EMAC_DEP_PREV_IDX) {
2271 np = *(dev->blist - 1);
2273 deps[i].phandle = 0;
2277 if (deps[i].node == NULL)
2278 deps[i].node = of_node_get(np);
2280 if (deps[i].node == NULL)
2281 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2282 if (deps[i].node == NULL)
2284 if (deps[i].ofdev == NULL)
2285 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2286 if (deps[i].ofdev == NULL)
2288 if (deps[i].drvdata == NULL)
2289 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2290 if (deps[i].drvdata != NULL)
2293 return (there == EMAC_DEP_COUNT);
2296 static void emac_put_deps(struct emac_instance *dev)
2299 of_dev_put(dev->mal_dev);
2301 of_dev_put(dev->zmii_dev);
2303 of_dev_put(dev->rgmii_dev);
2305 of_dev_put(dev->mdio_dev);
2307 of_dev_put(dev->tah_dev);
2310 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2311 unsigned long action, void *data)
2313 /* We are only intereted in device addition */
2314 if (action == BUS_NOTIFY_BOUND_DRIVER)
2315 wake_up_all(&emac_probe_wait);
2319 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2320 .notifier_call = emac_of_bus_notify
2323 static int __devinit emac_wait_deps(struct emac_instance *dev)
2325 struct emac_depentry deps[EMAC_DEP_COUNT];
2328 memset(&deps, 0, sizeof(deps));
2330 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2331 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2332 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2334 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2336 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2337 if (dev->blist && dev->blist > emac_boot_list)
2338 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2339 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2340 wait_event_timeout(emac_probe_wait,
2341 emac_check_deps(dev, deps),
2342 EMAC_PROBE_DEP_TIMEOUT);
2343 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2344 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2345 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2347 of_node_put(deps[i].node);
2348 if (err && deps[i].ofdev)
2349 of_dev_put(deps[i].ofdev);
2352 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2353 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2354 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2355 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2356 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2358 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2359 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2363 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2364 u32 *val, int fatal)
2367 const u32 *prop = of_get_property(np, name, &len);
2368 if (prop == NULL || len < sizeof(u32)) {
2370 printk(KERN_ERR "%s: missing %s property\n",
2371 np->full_name, name);
2378 static int __devinit emac_init_phy(struct emac_instance *dev)
2380 struct device_node *np = dev->ofdev->node;
2381 struct net_device *ndev = dev->ndev;
2385 dev->phy.dev = ndev;
2386 dev->phy.mode = dev->phy_mode;
2388 /* PHY-less configuration.
2389 * XXX I probably should move these settings to the dev tree
2391 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2394 /* PHY-less configuration.
2395 * XXX I probably should move these settings to the dev tree
2397 dev->phy.address = -1;
2398 dev->phy.features = SUPPORTED_MII;
2399 if (emac_phy_supports_gige(dev->phy_mode))
2400 dev->phy.features |= SUPPORTED_1000baseT_Full;
2402 dev->phy.features |= SUPPORTED_100baseT_Full;
2408 mutex_lock(&emac_phy_map_lock);
2409 phy_map = dev->phy_map | busy_phy_map;
2411 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2413 dev->phy.mdio_read = emac_mdio_read;
2414 dev->phy.mdio_write = emac_mdio_write;
2416 /* Enable internal clock source */
2417 #ifdef CONFIG_PPC_DCR_NATIVE
2418 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2419 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2421 /* PHY clock workaround */
2422 emac_rx_clk_tx(dev);
2424 /* Enable internal clock source on 440GX*/
2425 #ifdef CONFIG_PPC_DCR_NATIVE
2426 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2427 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2429 /* Configure EMAC with defaults so we can at least use MDIO
2430 * This is needed mostly for 440GX
2432 if (emac_phy_gpcs(dev->phy.mode)) {
2434 * Make GPCS PHY address equal to EMAC index.
2435 * We probably should take into account busy_phy_map
2436 * and/or phy_map here.
2438 * Note that the busy_phy_map is currently global
2439 * while it should probably be per-ASIC...
2441 dev->phy.gpcs_address = dev->gpcs_address;
2442 if (dev->phy.gpcs_address == 0xffffffff)
2443 dev->phy.address = dev->cell_index;
2446 emac_configure(dev);
2448 if (dev->phy_address != 0xffffffff)
2449 phy_map = ~(1 << dev->phy_address);
2451 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2452 if (!(phy_map & 1)) {
2454 busy_phy_map |= 1 << i;
2456 /* Quick check if there is a PHY at the address */
2457 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2458 if (r == 0xffff || r < 0)
2460 if (!emac_mii_phy_probe(&dev->phy, i))
2464 /* Enable external clock source */
2465 #ifdef CONFIG_PPC_DCR_NATIVE
2466 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2467 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2469 mutex_unlock(&emac_phy_map_lock);
2471 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2476 if (dev->phy.def->ops->init)
2477 dev->phy.def->ops->init(&dev->phy);
2479 /* Disable any PHY features not supported by the platform */
2480 dev->phy.def->features &= ~dev->phy_feat_exc;
2482 /* Setup initial link parameters */
2483 if (dev->phy.features & SUPPORTED_Autoneg) {
2484 adv = dev->phy.features;
2485 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2486 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2487 /* Restart autonegotiation */
2488 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2490 u32 f = dev->phy.def->features;
2491 int speed = SPEED_10, fd = DUPLEX_HALF;
2493 /* Select highest supported speed/duplex */
2494 if (f & SUPPORTED_1000baseT_Full) {
2497 } else if (f & SUPPORTED_1000baseT_Half)
2499 else if (f & SUPPORTED_100baseT_Full) {
2502 } else if (f & SUPPORTED_100baseT_Half)
2504 else if (f & SUPPORTED_10baseT_Full)
2507 /* Force link parameters */
2508 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2513 static int __devinit emac_init_config(struct emac_instance *dev)
2515 struct device_node *np = dev->ofdev->node;
2518 const char *pm, *phy_modes[] = {
2520 [PHY_MODE_MII] = "mii",
2521 [PHY_MODE_RMII] = "rmii",
2522 [PHY_MODE_SMII] = "smii",
2523 [PHY_MODE_RGMII] = "rgmii",
2524 [PHY_MODE_TBI] = "tbi",
2525 [PHY_MODE_GMII] = "gmii",
2526 [PHY_MODE_RTBI] = "rtbi",
2527 [PHY_MODE_SGMII] = "sgmii",
2530 /* Read config from device-tree */
2531 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2533 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2535 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2537 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2539 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2540 dev->max_mtu = 1500;
2541 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2542 dev->rx_fifo_size = 2048;
2543 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2544 dev->tx_fifo_size = 2048;
2545 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2546 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2547 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2548 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2549 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2550 dev->phy_address = 0xffffffff;
2551 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2552 dev->phy_map = 0xffffffff;
2553 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2554 dev->gpcs_address = 0xffffffff;
2555 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2557 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2559 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2561 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2563 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2565 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2566 dev->zmii_port = 0xffffffff;
2567 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2569 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2570 dev->rgmii_port = 0xffffffff;
2571 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2572 dev->fifo_entry_size = 16;
2573 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2574 dev->mal_burst_size = 256;
2576 /* PHY mode needs some decoding */
2577 dev->phy_mode = PHY_MODE_NA;
2578 pm = of_get_property(np, "phy-mode", &plen);
2581 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2582 if (!strcasecmp(pm, phy_modes[i])) {
2588 /* Backward compat with non-final DT */
2589 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2590 u32 nmode = *(const u32 *)pm;
2591 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2592 dev->phy_mode = nmode;
2595 /* Check EMAC version */
2596 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2597 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2598 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2599 of_device_is_compatible(np, "ibm,emac-460gt"))
2600 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2601 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2602 of_device_is_compatible(np, "ibm,emac-405exr"))
2603 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2604 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2605 dev->features |= EMAC_FTR_EMAC4;
2606 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2607 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2609 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2610 of_device_is_compatible(np, "ibm,emac-440gr"))
2611 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2612 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2613 #ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2614 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2616 printk(KERN_ERR "%s: Flow control not disabled!\n",
2624 /* Fixup some feature bits based on the device tree */
2625 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2626 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2627 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2628 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2630 /* CAB lacks the appropriate properties */
2631 if (of_device_is_compatible(np, "ibm,emac-axon"))
2632 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2633 EMAC_FTR_STACR_OC_INVERT;
2635 /* Enable TAH/ZMII/RGMII features as found */
2636 if (dev->tah_ph != 0) {
2637 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2638 dev->features |= EMAC_FTR_HAS_TAH;
2640 printk(KERN_ERR "%s: TAH support not enabled !\n",
2646 if (dev->zmii_ph != 0) {
2647 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2648 dev->features |= EMAC_FTR_HAS_ZMII;
2650 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2656 if (dev->rgmii_ph != 0) {
2657 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2658 dev->features |= EMAC_FTR_HAS_RGMII;
2660 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2666 /* Read MAC-address */
2667 p = of_get_property(np, "local-mac-address", NULL);
2669 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2673 memcpy(dev->ndev->dev_addr, p, 6);
2675 /* IAHT and GAHT filter parameterization */
2676 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2677 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2678 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2680 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2681 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2684 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2685 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2686 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2687 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2688 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2693 static const struct net_device_ops emac_netdev_ops = {
2694 .ndo_open = emac_open,
2695 .ndo_stop = emac_close,
2696 .ndo_get_stats = emac_stats,
2697 .ndo_set_multicast_list = emac_set_multicast_list,
2698 .ndo_do_ioctl = emac_ioctl,
2699 .ndo_tx_timeout = emac_tx_timeout,
2700 .ndo_validate_addr = eth_validate_addr,
2701 .ndo_set_mac_address = eth_mac_addr,
2702 .ndo_start_xmit = emac_start_xmit,
2703 .ndo_change_mtu = eth_change_mtu,
2706 static const struct net_device_ops emac_gige_netdev_ops = {
2707 .ndo_open = emac_open,
2708 .ndo_stop = emac_close,
2709 .ndo_get_stats = emac_stats,
2710 .ndo_set_multicast_list = emac_set_multicast_list,
2711 .ndo_do_ioctl = emac_ioctl,
2712 .ndo_tx_timeout = emac_tx_timeout,
2713 .ndo_validate_addr = eth_validate_addr,
2714 .ndo_set_mac_address = eth_mac_addr,
2715 .ndo_start_xmit = emac_start_xmit_sg,
2716 .ndo_change_mtu = emac_change_mtu,
2719 static int __devinit emac_probe(struct of_device *ofdev,
2720 const struct of_device_id *match)
2722 struct net_device *ndev;
2723 struct emac_instance *dev;
2724 struct device_node *np = ofdev->node;
2725 struct device_node **blist = NULL;
2728 /* Skip unused/unwired EMACS. We leave the check for an unused
2729 * property here for now, but new flat device trees should set a
2730 * status property to "disabled" instead.
2732 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2735 /* Find ourselves in the bootlist if we are there */
2736 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2737 if (emac_boot_list[i] == np)
2738 blist = &emac_boot_list[i];
2740 /* Allocate our net_device structure */
2742 ndev = alloc_etherdev(sizeof(struct emac_instance));
2744 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2748 dev = netdev_priv(ndev);
2752 SET_NETDEV_DEV(ndev, &ofdev->dev);
2754 /* Initialize some embedded data structures */
2755 mutex_init(&dev->mdio_lock);
2756 mutex_init(&dev->link_lock);
2757 spin_lock_init(&dev->lock);
2758 INIT_WORK(&dev->reset_work, emac_reset_work);
2760 /* Init various config data based on device-tree */
2761 err = emac_init_config(dev);
2765 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2766 dev->emac_irq = irq_of_parse_and_map(np, 0);
2767 dev->wol_irq = irq_of_parse_and_map(np, 1);
2768 if (dev->emac_irq == NO_IRQ) {
2769 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2772 ndev->irq = dev->emac_irq;
2775 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2776 printk(KERN_ERR "%s: Can't get registers address\n",
2780 // TODO : request_mem_region
2781 dev->emacp = ioremap(dev->rsrc_regs.start,
2782 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
2783 if (dev->emacp == NULL) {
2784 printk(KERN_ERR "%s: Can't map device registers!\n",
2790 /* Wait for dependent devices */
2791 err = emac_wait_deps(dev);
2794 "%s: Timeout waiting for dependent devices\n",
2796 /* display more info about what's missing ? */
2799 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2800 if (dev->mdio_dev != NULL)
2801 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2803 /* Register with MAL */
2804 dev->commac.ops = &emac_commac_ops;
2805 dev->commac.dev = dev;
2806 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2807 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2808 err = mal_register_commac(dev->mal, &dev->commac);
2810 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2811 np->full_name, dev->mal_dev->node->full_name);
2814 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2815 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2817 /* Get pointers to BD rings */
2819 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2821 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2823 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2824 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2827 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2828 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2829 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2830 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2832 /* Attach to ZMII, if needed */
2833 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2834 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2835 goto err_unreg_commac;
2837 /* Attach to RGMII, if needed */
2838 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2839 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2840 goto err_detach_zmii;
2842 /* Attach to TAH, if needed */
2843 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2844 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2845 goto err_detach_rgmii;
2847 /* Set some link defaults before we can find out real parameters */
2848 dev->phy.speed = SPEED_100;
2849 dev->phy.duplex = DUPLEX_FULL;
2850 dev->phy.autoneg = AUTONEG_DISABLE;
2851 dev->phy.pause = dev->phy.asym_pause = 0;
2852 dev->stop_timeout = STOP_TIMEOUT_100;
2853 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2855 /* Find PHY if any */
2856 err = emac_init_phy(dev);
2858 goto err_detach_tah;
2861 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2862 ndev->watchdog_timeo = 5 * HZ;
2863 if (emac_phy_supports_gige(dev->phy_mode)) {
2864 ndev->netdev_ops = &emac_gige_netdev_ops;
2865 dev->commac.ops = &emac_commac_sg_ops;
2867 ndev->netdev_ops = &emac_netdev_ops;
2868 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2870 netif_carrier_off(ndev);
2871 netif_stop_queue(ndev);
2873 err = register_netdev(ndev);
2875 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2876 np->full_name, err);
2877 goto err_detach_tah;
2880 /* Set our drvdata last as we don't want them visible until we are
2884 dev_set_drvdata(&ofdev->dev, dev);
2886 /* There's a new kid in town ! Let's tell everybody */
2887 wake_up_all(&emac_probe_wait);
2890 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2891 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2893 if (dev->phy_mode == PHY_MODE_SGMII)
2894 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2896 if (dev->phy.address >= 0)
2897 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2898 dev->phy.def->name, dev->phy.address);
2900 emac_dbg_register(dev);
2905 /* I have a bad feeling about this ... */
2908 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2909 tah_detach(dev->tah_dev, dev->tah_port);
2911 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2912 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2914 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2915 zmii_detach(dev->zmii_dev, dev->zmii_port);
2917 mal_unregister_commac(dev->mal, &dev->commac);
2921 iounmap(dev->emacp);
2923 if (dev->wol_irq != NO_IRQ)
2924 irq_dispose_mapping(dev->wol_irq);
2925 if (dev->emac_irq != NO_IRQ)
2926 irq_dispose_mapping(dev->emac_irq);
2930 /* if we were on the bootlist, remove us as we won't show up and
2931 * wake up all waiters to notify them in case they were waiting
2936 wake_up_all(&emac_probe_wait);
2941 static int __devexit emac_remove(struct of_device *ofdev)
2943 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2945 DBG(dev, "remove" NL);
2947 dev_set_drvdata(&ofdev->dev, NULL);
2949 unregister_netdev(dev->ndev);
2951 flush_scheduled_work();
2953 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2954 tah_detach(dev->tah_dev, dev->tah_port);
2955 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2956 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2957 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2958 zmii_detach(dev->zmii_dev, dev->zmii_port);
2960 mal_unregister_commac(dev->mal, &dev->commac);
2963 emac_dbg_unregister(dev);
2964 iounmap(dev->emacp);
2966 if (dev->wol_irq != NO_IRQ)
2967 irq_dispose_mapping(dev->wol_irq);
2968 if (dev->emac_irq != NO_IRQ)
2969 irq_dispose_mapping(dev->emac_irq);
2976 /* XXX Features in here should be replaced by properties... */
2977 static struct of_device_id emac_match[] =
2981 .compatible = "ibm,emac",
2985 .compatible = "ibm,emac4",
2989 .compatible = "ibm,emac4sync",
2994 static struct of_platform_driver emac_driver = {
2996 .match_table = emac_match,
2998 .probe = emac_probe,
2999 .remove = emac_remove,
3002 static void __init emac_make_bootlist(void)
3004 struct device_node *np = NULL;
3005 int j, max, i = 0, k;
3006 int cell_indices[EMAC_BOOT_LIST_SIZE];
3009 while((np = of_find_all_nodes(np)) != NULL) {
3012 if (of_match_node(emac_match, np) == NULL)
3014 if (of_get_property(np, "unused", NULL))
3016 idx = of_get_property(np, "cell-index", NULL);
3019 cell_indices[i] = *idx;
3020 emac_boot_list[i++] = of_node_get(np);
3021 if (i >= EMAC_BOOT_LIST_SIZE) {
3028 /* Bubble sort them (doh, what a creative algorithm :-) */
3029 for (i = 0; max > 1 && (i < (max - 1)); i++)
3030 for (j = i; j < max; j++) {
3031 if (cell_indices[i] > cell_indices[j]) {
3032 np = emac_boot_list[i];
3033 emac_boot_list[i] = emac_boot_list[j];
3034 emac_boot_list[j] = np;
3035 k = cell_indices[i];
3036 cell_indices[i] = cell_indices[j];
3037 cell_indices[j] = k;
3042 static int __init emac_init(void)
3046 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3048 /* Init debug stuff */
3051 /* Build EMAC boot list */
3052 emac_make_bootlist();
3054 /* Init submodules */
3067 rc = of_register_platform_driver(&emac_driver);
3085 static void __exit emac_exit(void)
3089 of_unregister_platform_driver(&emac_driver);
3097 /* Destroy EMAC boot list */
3098 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3099 if (emac_boot_list[i])
3100 of_node_put(emac_boot_list[i]);
3103 module_init(emac_init);
3104 module_exit(emac_exit);