2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/ptrace.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45 #include <linux/bitops.h>
47 #include <linux/irq.h>
48 #include <linux/clk.h>
49 #include <linux/platform_device.h>
50 #include <linux/phy.h>
51 #include <linux/fec.h>
53 #include <linux/of_device.h>
54 #include <linux/of_gpio.h>
55 #include <linux/of_net.h>
56 #include <linux/regulator/consumer.h>
57 #include <linux/if_vlan.h>
58 #include <linux/pinctrl/consumer.h>
60 #include <asm/cacheflush.h>
64 static void set_multicast_list(struct net_device *ndev);
66 #if defined(CONFIG_ARM)
67 #define FEC_ALIGNMENT 0xf
69 #define FEC_ALIGNMENT 0x3
72 #define DRIVER_NAME "fec"
74 /* Pause frame feild and FIFO threshold */
75 #define FEC_ENET_FCE (1 << 5)
76 #define FEC_ENET_RSEM_V 0x84
77 #define FEC_ENET_RSFL_V 16
78 #define FEC_ENET_RAEM_V 0x8
79 #define FEC_ENET_RAFL_V 0x8
80 #define FEC_ENET_OPD_V 0xFFF0
82 /* Controller is ENET-MAC */
83 #define FEC_QUIRK_ENET_MAC (1 << 0)
84 /* Controller needs driver to swap frame */
85 #define FEC_QUIRK_SWAP_FRAME (1 << 1)
86 /* Controller uses gasket */
87 #define FEC_QUIRK_USE_GASKET (1 << 2)
88 /* Controller has GBIT support */
89 #define FEC_QUIRK_HAS_GBIT (1 << 3)
90 /* Controller has extend desc buffer */
91 #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
92 /* Controller has hardware checksum support */
93 #define FEC_QUIRK_HAS_CSUM (1 << 5)
94 /* Controller has hardware vlan support */
95 #define FEC_QUIRK_HAS_VLAN (1 << 6)
96 /* ENET IP errata ERR006358
98 * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
99 * detected as not set during a prior frame transmission, then the
100 * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
101 * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
102 * frames not being transmitted until there is a 0-to-1 transition on
105 #define FEC_QUIRK_ERR006358 (1 << 7)
107 static struct platform_device_id fec_devtype[] = {
109 /* keep it for coldfire */
114 .driver_data = FEC_QUIRK_USE_GASKET,
120 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
123 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
124 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
125 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
127 .name = "mvf600-fec",
128 .driver_data = FEC_QUIRK_ENET_MAC,
133 MODULE_DEVICE_TABLE(platform, fec_devtype);
136 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
137 IMX27_FEC, /* runs on i.mx27/35/51 */
143 static const struct of_device_id fec_dt_ids[] = {
144 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
145 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
146 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
147 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
148 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
151 MODULE_DEVICE_TABLE(of, fec_dt_ids);
153 static unsigned char macaddr[ETH_ALEN];
154 module_param_array(macaddr, byte, NULL, 0);
155 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
157 #if defined(CONFIG_M5272)
159 * Some hardware gets it MAC address out of local flash memory.
160 * if this is non-zero then assume it is the address to get MAC from.
162 #if defined(CONFIG_NETtel)
163 #define FEC_FLASHMAC 0xf0006006
164 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
165 #define FEC_FLASHMAC 0xf0006000
166 #elif defined(CONFIG_CANCam)
167 #define FEC_FLASHMAC 0xf0020000
168 #elif defined (CONFIG_M5272C3)
169 #define FEC_FLASHMAC (0xffe04000 + 4)
170 #elif defined(CONFIG_MOD5272)
171 #define FEC_FLASHMAC 0xffc0406b
173 #define FEC_FLASHMAC 0
175 #endif /* CONFIG_M5272 */
177 /* Interrupt events/masks. */
178 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
179 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
180 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
181 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
182 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
183 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
184 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
185 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
186 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
187 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
189 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
190 #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
192 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
194 #define PKT_MAXBUF_SIZE 1522
195 #define PKT_MINBUF_SIZE 64
196 #define PKT_MAXBLR_SIZE 1536
198 /* FEC receive acceleration */
199 #define FEC_RACC_IPDIS (1 << 1)
200 #define FEC_RACC_PRODIS (1 << 2)
201 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
204 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
205 * size bits. Other FEC hardware does not, so we need to take that into
206 * account when setting it.
208 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
209 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
210 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
212 #define OPT_FRAME_SIZE 0
215 /* FEC MII MMFR bits definition */
216 #define FEC_MMFR_ST (1 << 30)
217 #define FEC_MMFR_OP_READ (2 << 28)
218 #define FEC_MMFR_OP_WRITE (1 << 28)
219 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
220 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
221 #define FEC_MMFR_TA (2 << 16)
222 #define FEC_MMFR_DATA(v) (v & 0xffff)
224 #define FEC_MII_TIMEOUT 30000 /* us */
226 /* Transmitter timeout */
227 #define TX_TIMEOUT (2 * HZ)
229 #define FEC_PAUSE_FLAG_AUTONEG 0x1
230 #define FEC_PAUSE_FLAG_ENABLE 0x2
232 #define TSO_HEADER_SIZE 128
233 /* Max number of allowed TCP segments for software TSO */
234 #define FEC_MAX_TSO_SEGS 100
235 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
237 #define IS_TSO_HEADER(txq, addr) \
238 ((addr >= txq->tso_hdrs_dma) && \
239 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
244 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
246 struct bufdesc *new_bd = bdp + 1;
247 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
248 struct bufdesc_ex *ex_base;
249 struct bufdesc *base;
252 if (bdp >= fep->tx_bd_base) {
253 base = fep->tx_bd_base;
254 ring_size = fep->tx_ring_size;
255 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
257 base = fep->rx_bd_base;
258 ring_size = fep->rx_ring_size;
259 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
263 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
264 ex_base : ex_new_bd);
266 return (new_bd >= (base + ring_size)) ?
271 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
273 struct bufdesc *new_bd = bdp - 1;
274 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
275 struct bufdesc_ex *ex_base;
276 struct bufdesc *base;
279 if (bdp >= fep->tx_bd_base) {
280 base = fep->tx_bd_base;
281 ring_size = fep->tx_ring_size;
282 ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
284 base = fep->rx_bd_base;
285 ring_size = fep->rx_ring_size;
286 ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
290 return (struct bufdesc *)((ex_new_bd < ex_base) ?
291 (ex_new_bd + ring_size) : ex_new_bd);
293 return (new_bd < base) ? (new_bd + ring_size) : new_bd;
296 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
297 struct fec_enet_private *fep)
299 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
302 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
306 entries = ((const char *)fep->dirty_tx -
307 (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
309 return entries > 0 ? entries : entries + fep->tx_ring_size;
312 static void *swap_buffer(void *bufaddr, int len)
315 unsigned int *buf = bufaddr;
317 for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++)
318 *buf = cpu_to_be32(*buf);
323 static void fec_dump(struct net_device *ndev)
325 struct fec_enet_private *fep = netdev_priv(ndev);
326 struct bufdesc *bdp = fep->tx_bd_base;
327 unsigned int index = 0;
329 netdev_info(ndev, "TX ring dump\n");
330 pr_info("Nr SC addr len SKB\n");
333 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
335 bdp == fep->cur_tx ? 'S' : ' ',
336 bdp == fep->dirty_tx ? 'H' : ' ',
337 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
338 fep->tx_skbuff[index]);
339 bdp = fec_enet_get_nextdesc(bdp, fep);
341 } while (bdp != fep->tx_bd_base);
344 static inline bool is_ipv4_pkt(struct sk_buff *skb)
346 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
350 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
352 /* Only run for packets requiring a checksum. */
353 if (skb->ip_summed != CHECKSUM_PARTIAL)
356 if (unlikely(skb_cow_head(skb, 0)))
359 if (is_ipv4_pkt(skb))
360 ip_hdr(skb)->check = 0;
361 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
367 fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
369 struct fec_enet_private *fep = netdev_priv(ndev);
370 const struct platform_device_id *id_entry =
371 platform_get_device_id(fep->pdev);
372 struct bufdesc *bdp = fep->cur_tx;
373 struct bufdesc_ex *ebdp;
374 int nr_frags = skb_shinfo(skb)->nr_frags;
376 unsigned short status;
377 unsigned int estatus = 0;
378 skb_frag_t *this_frag;
384 for (frag = 0; frag < nr_frags; frag++) {
385 this_frag = &skb_shinfo(skb)->frags[frag];
386 bdp = fec_enet_get_nextdesc(bdp, fep);
387 ebdp = (struct bufdesc_ex *)bdp;
389 status = bdp->cbd_sc;
390 status &= ~BD_ENET_TX_STATS;
391 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
392 frag_len = skb_shinfo(skb)->frags[frag].size;
394 /* Handle the last BD specially */
395 if (frag == nr_frags - 1) {
396 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
397 if (fep->bufdesc_ex) {
398 estatus |= BD_ENET_TX_INT;
399 if (unlikely(skb_shinfo(skb)->tx_flags &
400 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
401 estatus |= BD_ENET_TX_TS;
405 if (fep->bufdesc_ex) {
406 if (skb->ip_summed == CHECKSUM_PARTIAL)
407 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
409 ebdp->cbd_esc = estatus;
412 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
414 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
415 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
416 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
417 memcpy(fep->tx_bounce[index], bufaddr, frag_len);
418 bufaddr = fep->tx_bounce[index];
420 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
421 swap_buffer(bufaddr, frag_len);
424 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
426 if (dma_mapping_error(&fep->pdev->dev, addr)) {
427 dev_kfree_skb_any(skb);
429 netdev_err(ndev, "Tx DMA memory map failed\n");
430 goto dma_mapping_error;
433 bdp->cbd_bufaddr = addr;
434 bdp->cbd_datlen = frag_len;
435 bdp->cbd_sc = status;
444 for (i = 0; i < frag; i++) {
445 bdp = fec_enet_get_nextdesc(bdp, fep);
446 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
447 bdp->cbd_datlen, DMA_TO_DEVICE);
452 static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
454 struct fec_enet_private *fep = netdev_priv(ndev);
455 const struct platform_device_id *id_entry =
456 platform_get_device_id(fep->pdev);
457 int nr_frags = skb_shinfo(skb)->nr_frags;
458 struct bufdesc *bdp, *last_bdp;
461 unsigned short status;
462 unsigned short buflen;
463 unsigned int estatus = 0;
468 entries_free = fec_enet_get_free_txdesc_num(fep);
469 if (entries_free < MAX_SKB_FRAGS + 1) {
470 dev_kfree_skb_any(skb);
472 netdev_err(ndev, "NOT enough BD for SG!\n");
476 /* Protocol checksum off-load for TCP and UDP. */
477 if (fec_enet_clear_csum(skb, ndev)) {
478 dev_kfree_skb_any(skb);
482 /* Fill in a Tx ring entry */
484 status = bdp->cbd_sc;
485 status &= ~BD_ENET_TX_STATS;
487 /* Set buffer length and buffer pointer */
489 buflen = skb_headlen(skb);
491 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
492 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
493 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
494 memcpy(fep->tx_bounce[index], skb->data, buflen);
495 bufaddr = fep->tx_bounce[index];
497 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
498 swap_buffer(bufaddr, buflen);
501 /* Push the data cache so the CPM does not get stale memory data. */
502 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
503 if (dma_mapping_error(&fep->pdev->dev, addr)) {
504 dev_kfree_skb_any(skb);
506 netdev_err(ndev, "Tx DMA memory map failed\n");
511 ret = fec_enet_txq_submit_frag_skb(skb, ndev);
515 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
516 if (fep->bufdesc_ex) {
517 estatus = BD_ENET_TX_INT;
518 if (unlikely(skb_shinfo(skb)->tx_flags &
519 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
520 estatus |= BD_ENET_TX_TS;
524 if (fep->bufdesc_ex) {
526 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
528 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
530 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
532 if (skb->ip_summed == CHECKSUM_PARTIAL)
533 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
536 ebdp->cbd_esc = estatus;
539 last_bdp = fep->cur_tx;
540 index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
541 /* Save skb pointer */
542 fep->tx_skbuff[index] = skb;
544 bdp->cbd_datlen = buflen;
545 bdp->cbd_bufaddr = addr;
547 /* Send it on its way. Tell FEC it's ready, interrupt when done,
548 * it's the last BD of the frame, and to put the CRC on the end.
550 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
551 bdp->cbd_sc = status;
553 /* If this was the last BD in the ring, start at the beginning again. */
554 bdp = fec_enet_get_nextdesc(last_bdp, fep);
556 skb_tx_timestamp(skb);
560 /* Trigger transmission start */
561 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
567 fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
568 struct bufdesc *bdp, int index, char *data,
569 int size, bool last_tcp, bool is_last)
571 struct fec_enet_private *fep = netdev_priv(ndev);
572 const struct platform_device_id *id_entry =
573 platform_get_device_id(fep->pdev);
574 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
575 unsigned short status;
576 unsigned int estatus = 0;
579 status = bdp->cbd_sc;
580 status &= ~BD_ENET_TX_STATS;
582 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
584 if (((unsigned long) data) & FEC_ALIGNMENT ||
585 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
586 memcpy(fep->tx_bounce[index], data, size);
587 data = fep->tx_bounce[index];
589 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
590 swap_buffer(data, size);
593 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
594 if (dma_mapping_error(&fep->pdev->dev, addr)) {
595 dev_kfree_skb_any(skb);
597 netdev_err(ndev, "Tx DMA memory map failed\n");
598 return NETDEV_TX_BUSY;
601 bdp->cbd_datlen = size;
602 bdp->cbd_bufaddr = addr;
604 if (fep->bufdesc_ex) {
605 if (skb->ip_summed == CHECKSUM_PARTIAL)
606 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
608 ebdp->cbd_esc = estatus;
611 /* Handle the last BD specially */
613 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
615 status |= BD_ENET_TX_INTR;
617 ebdp->cbd_esc |= BD_ENET_TX_INT;
620 bdp->cbd_sc = status;
626 fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
627 struct bufdesc *bdp, int index)
629 struct fec_enet_private *fep = netdev_priv(ndev);
630 const struct platform_device_id *id_entry =
631 platform_get_device_id(fep->pdev);
632 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
633 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
635 unsigned long dmabuf;
636 unsigned short status;
637 unsigned int estatus = 0;
639 status = bdp->cbd_sc;
640 status &= ~BD_ENET_TX_STATS;
641 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
643 bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
644 dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
645 if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
646 id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
647 memcpy(fep->tx_bounce[index], skb->data, hdr_len);
648 bufaddr = fep->tx_bounce[index];
650 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
651 swap_buffer(bufaddr, hdr_len);
653 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
654 hdr_len, DMA_TO_DEVICE);
655 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
656 dev_kfree_skb_any(skb);
658 netdev_err(ndev, "Tx DMA memory map failed\n");
659 return NETDEV_TX_BUSY;
663 bdp->cbd_bufaddr = dmabuf;
664 bdp->cbd_datlen = hdr_len;
666 if (fep->bufdesc_ex) {
667 if (skb->ip_summed == CHECKSUM_PARTIAL)
668 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
670 ebdp->cbd_esc = estatus;
673 bdp->cbd_sc = status;
678 static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
680 struct fec_enet_private *fep = netdev_priv(ndev);
681 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
682 int total_len, data_left;
683 struct bufdesc *bdp = fep->cur_tx;
685 unsigned int index = 0;
688 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
689 dev_kfree_skb_any(skb);
691 netdev_err(ndev, "NOT enough BD for TSO!\n");
695 /* Protocol checksum off-load for TCP and UDP. */
696 if (fec_enet_clear_csum(skb, ndev)) {
697 dev_kfree_skb_any(skb);
701 /* Initialize the TSO handler, and prepare the first payload */
702 tso_start(skb, &tso);
704 total_len = skb->len - hdr_len;
705 while (total_len > 0) {
708 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
709 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
710 total_len -= data_left;
712 /* prepare packet headers: MAC + IP + TCP */
713 hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
714 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
715 ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
719 while (data_left > 0) {
722 size = min_t(int, tso.size, data_left);
723 bdp = fec_enet_get_nextdesc(bdp, fep);
724 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
725 ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
726 size, size == data_left,
732 tso_build_data(skb, &tso, size);
735 bdp = fec_enet_get_nextdesc(bdp, fep);
738 /* Save skb pointer */
739 fep->tx_skbuff[index] = skb;
741 skb_tx_timestamp(skb);
744 /* Trigger transmission start */
745 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
750 /* TODO: Release all used data descriptors for TSO */
755 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
757 struct fec_enet_private *fep = netdev_priv(ndev);
762 ret = fec_enet_txq_submit_tso(skb, ndev);
764 ret = fec_enet_txq_submit_skb(skb, ndev);
768 entries_free = fec_enet_get_free_txdesc_num(fep);
769 if (entries_free <= fep->tx_stop_threshold)
770 netif_stop_queue(ndev);
775 /* Init RX & TX buffer descriptors
777 static void fec_enet_bd_init(struct net_device *dev)
779 struct fec_enet_private *fep = netdev_priv(dev);
783 /* Initialize the receive buffer descriptors. */
784 bdp = fep->rx_bd_base;
785 for (i = 0; i < fep->rx_ring_size; i++) {
787 /* Initialize the BD for every fragment in the page. */
788 if (bdp->cbd_bufaddr)
789 bdp->cbd_sc = BD_ENET_RX_EMPTY;
792 bdp = fec_enet_get_nextdesc(bdp, fep);
795 /* Set the last buffer to wrap */
796 bdp = fec_enet_get_prevdesc(bdp, fep);
797 bdp->cbd_sc |= BD_SC_WRAP;
799 fep->cur_rx = fep->rx_bd_base;
801 /* ...and the same for transmit */
802 bdp = fep->tx_bd_base;
804 for (i = 0; i < fep->tx_ring_size; i++) {
806 /* Initialize the BD for every fragment in the page. */
808 if (fep->tx_skbuff[i]) {
809 dev_kfree_skb_any(fep->tx_skbuff[i]);
810 fep->tx_skbuff[i] = NULL;
812 bdp->cbd_bufaddr = 0;
813 bdp = fec_enet_get_nextdesc(bdp, fep);
816 /* Set the last buffer to wrap */
817 bdp = fec_enet_get_prevdesc(bdp, fep);
818 bdp->cbd_sc |= BD_SC_WRAP;
823 * This function is called to start or restart the FEC during a link
824 * change, transmit timeout, or to reconfigure the FEC. The network
825 * packet processing for this device must be stopped before this call.
828 fec_restart(struct net_device *ndev)
830 struct fec_enet_private *fep = netdev_priv(ndev);
831 const struct platform_device_id *id_entry =
832 platform_get_device_id(fep->pdev);
836 u32 rcntl = OPT_FRAME_SIZE | 0x04;
837 u32 ecntl = 0x2; /* ETHEREN */
839 /* Whack a reset. We should wait for this. */
840 writel(1, fep->hwp + FEC_ECNTRL);
844 * enet-mac reset will reset mac address registers too,
845 * so need to reconfigure it.
847 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
848 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
849 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
850 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
853 /* Clear any outstanding interrupt. */
854 writel(0xffc00000, fep->hwp + FEC_IEVENT);
856 /* Set maximum receive buffer size. */
857 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
859 fec_enet_bd_init(ndev);
861 /* Set receive and transmit descriptor base. */
862 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
864 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
865 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
867 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
868 * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
871 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
872 if (fep->tx_skbuff[i]) {
873 dev_kfree_skb_any(fep->tx_skbuff[i]);
874 fep->tx_skbuff[i] = NULL;
878 /* Enable MII mode */
879 if (fep->full_duplex == DUPLEX_FULL) {
881 writel(0x04, fep->hwp + FEC_X_CNTRL);
885 writel(0x0, fep->hwp + FEC_X_CNTRL);
889 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
891 #if !defined(CONFIG_M5272)
892 /* set RX checksum */
893 val = readl(fep->hwp + FEC_RACC);
894 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
895 val |= FEC_RACC_OPTIONS;
897 val &= ~FEC_RACC_OPTIONS;
898 writel(val, fep->hwp + FEC_RACC);
902 * The phy interface and speed need to get configured
903 * differently on enet-mac.
905 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
906 /* Enable flow control and length check */
907 rcntl |= 0x40000000 | 0x00000020;
909 /* RGMII, RMII or MII */
910 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
912 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
917 /* 1G, 100M or 10M */
919 if (fep->phy_dev->speed == SPEED_1000)
921 else if (fep->phy_dev->speed == SPEED_100)
927 #ifdef FEC_MIIGSK_ENR
928 if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
930 /* disable the gasket and wait */
931 writel(0, fep->hwp + FEC_MIIGSK_ENR);
932 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
936 * configure the gasket:
937 * RMII, 50 MHz, no loopback, no echo
938 * MII, 25 MHz, no loopback, no echo
940 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
941 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
942 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
943 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
944 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
946 /* re-enable the gasket */
947 writel(2, fep->hwp + FEC_MIIGSK_ENR);
952 #if !defined(CONFIG_M5272)
953 /* enable pause frame*/
954 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
955 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
956 fep->phy_dev && fep->phy_dev->pause)) {
957 rcntl |= FEC_ENET_FCE;
959 /* set FIFO threshold parameter to reduce overrun */
960 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
961 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
962 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
963 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
966 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
968 rcntl &= ~FEC_ENET_FCE;
970 #endif /* !defined(CONFIG_M5272) */
972 writel(rcntl, fep->hwp + FEC_R_CNTRL);
974 /* Setup multicast filter. */
975 set_multicast_list(ndev);
977 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
978 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
981 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
982 /* enable ENET endian swap */
984 /* enable ENET store and forward mode */
985 writel(1 << 8, fep->hwp + FEC_X_WMRK);
992 /* Enable the MIB statistic event counters */
993 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
996 /* And last, enable the transmit and receive processing */
997 writel(ecntl, fep->hwp + FEC_ECNTRL);
998 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1000 if (fep->bufdesc_ex)
1001 fec_ptp_start_cyclecounter(ndev);
1003 /* Enable interrupts we wish to service */
1004 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1008 fec_stop(struct net_device *ndev)
1010 struct fec_enet_private *fep = netdev_priv(ndev);
1011 const struct platform_device_id *id_entry =
1012 platform_get_device_id(fep->pdev);
1013 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1015 /* We cannot expect a graceful transmit stop without link !!! */
1017 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1019 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1020 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1023 /* Whack a reset. We should wait for this. */
1024 writel(1, fep->hwp + FEC_ECNTRL);
1026 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1027 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1029 /* We have to keep ENET enabled to have MII interrupt stay working */
1030 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1031 writel(2, fep->hwp + FEC_ECNTRL);
1032 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1038 fec_timeout(struct net_device *ndev)
1040 struct fec_enet_private *fep = netdev_priv(ndev);
1044 ndev->stats.tx_errors++;
1046 schedule_work(&fep->tx_timeout_work);
1049 static void fec_enet_timeout_work(struct work_struct *work)
1051 struct fec_enet_private *fep =
1052 container_of(work, struct fec_enet_private, tx_timeout_work);
1053 struct net_device *ndev = fep->netdev;
1056 if (netif_device_present(ndev) || netif_running(ndev)) {
1057 napi_disable(&fep->napi);
1058 netif_tx_lock_bh(ndev);
1060 netif_wake_queue(ndev);
1061 netif_tx_unlock_bh(ndev);
1062 napi_enable(&fep->napi);
1068 fec_enet_tx(struct net_device *ndev)
1070 struct fec_enet_private *fep;
1071 struct bufdesc *bdp;
1072 unsigned short status;
1073 struct sk_buff *skb;
1077 fep = netdev_priv(ndev);
1078 bdp = fep->dirty_tx;
1080 /* get next bdp of dirty_tx */
1081 bdp = fec_enet_get_nextdesc(bdp, fep);
1083 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
1085 /* current queue is empty */
1086 if (bdp == fep->cur_tx)
1089 index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
1091 skb = fep->tx_skbuff[index];
1092 fep->tx_skbuff[index] = NULL;
1093 if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
1094 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1095 bdp->cbd_datlen, DMA_TO_DEVICE);
1096 bdp->cbd_bufaddr = 0;
1098 bdp = fec_enet_get_nextdesc(bdp, fep);
1102 /* Check for errors. */
1103 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1104 BD_ENET_TX_RL | BD_ENET_TX_UN |
1106 ndev->stats.tx_errors++;
1107 if (status & BD_ENET_TX_HB) /* No heartbeat */
1108 ndev->stats.tx_heartbeat_errors++;
1109 if (status & BD_ENET_TX_LC) /* Late collision */
1110 ndev->stats.tx_window_errors++;
1111 if (status & BD_ENET_TX_RL) /* Retrans limit */
1112 ndev->stats.tx_aborted_errors++;
1113 if (status & BD_ENET_TX_UN) /* Underrun */
1114 ndev->stats.tx_fifo_errors++;
1115 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1116 ndev->stats.tx_carrier_errors++;
1118 ndev->stats.tx_packets++;
1119 ndev->stats.tx_bytes += skb->len;
1122 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
1124 struct skb_shared_hwtstamps shhwtstamps;
1125 unsigned long flags;
1126 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1128 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1129 spin_lock_irqsave(&fep->tmreg_lock, flags);
1130 shhwtstamps.hwtstamp = ns_to_ktime(
1131 timecounter_cyc2time(&fep->tc, ebdp->ts));
1132 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1133 skb_tstamp_tx(skb, &shhwtstamps);
1136 /* Deferred means some collisions occurred during transmit,
1137 * but we eventually sent the packet OK.
1139 if (status & BD_ENET_TX_DEF)
1140 ndev->stats.collisions++;
1142 /* Free the sk buffer associated with this last transmit */
1143 dev_kfree_skb_any(skb);
1145 fep->dirty_tx = bdp;
1147 /* Update pointer to next buffer descriptor to be transmitted */
1148 bdp = fec_enet_get_nextdesc(bdp, fep);
1150 /* Since we have freed up a buffer, the ring is no longer full
1152 if (netif_queue_stopped(ndev)) {
1153 entries_free = fec_enet_get_free_txdesc_num(fep);
1154 if (entries_free >= fep->tx_wake_threshold)
1155 netif_wake_queue(ndev);
1159 /* ERR006538: Keep the transmitter going */
1160 if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
1161 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
1164 /* During a receive, the cur_rx points to the current incoming buffer.
1165 * When we update through the ring, if the next incoming buffer has
1166 * not been given to the system, we just set the empty indicator,
1167 * effectively tossing the packet.
1170 fec_enet_rx(struct net_device *ndev, int budget)
1172 struct fec_enet_private *fep = netdev_priv(ndev);
1173 const struct platform_device_id *id_entry =
1174 platform_get_device_id(fep->pdev);
1175 struct bufdesc *bdp;
1176 unsigned short status;
1177 struct sk_buff *skb;
1180 int pkt_received = 0;
1181 struct bufdesc_ex *ebdp = NULL;
1182 bool vlan_packet_rcvd = false;
1190 /* First, grab all of the stats for the incoming packet.
1191 * These get messed up if we get called due to a busy condition.
1195 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1197 if (pkt_received >= budget)
1201 /* Since we have allocated space to hold a complete frame,
1202 * the last indicator should be set.
1204 if ((status & BD_ENET_RX_LAST) == 0)
1205 netdev_err(ndev, "rcv is not +last\n");
1207 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1209 /* Check for errors. */
1210 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1211 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
1212 ndev->stats.rx_errors++;
1213 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
1214 /* Frame too long or too short. */
1215 ndev->stats.rx_length_errors++;
1217 if (status & BD_ENET_RX_NO) /* Frame alignment */
1218 ndev->stats.rx_frame_errors++;
1219 if (status & BD_ENET_RX_CR) /* CRC Error */
1220 ndev->stats.rx_crc_errors++;
1221 if (status & BD_ENET_RX_OV) /* FIFO overrun */
1222 ndev->stats.rx_fifo_errors++;
1225 /* Report late collisions as a frame error.
1226 * On this error, the BD is closed, but we don't know what we
1227 * have in the buffer. So, just drop this frame on the floor.
1229 if (status & BD_ENET_RX_CL) {
1230 ndev->stats.rx_errors++;
1231 ndev->stats.rx_frame_errors++;
1232 goto rx_processing_done;
1235 /* Process the incoming frame. */
1236 ndev->stats.rx_packets++;
1237 pkt_len = bdp->cbd_datlen;
1238 ndev->stats.rx_bytes += pkt_len;
1240 index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
1241 data = fep->rx_skbuff[index]->data;
1242 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1243 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1245 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
1246 swap_buffer(data, pkt_len);
1248 /* Extract the enhanced buffer descriptor */
1250 if (fep->bufdesc_ex)
1251 ebdp = (struct bufdesc_ex *)bdp;
1253 /* If this is a VLAN packet remove the VLAN Tag */
1254 vlan_packet_rcvd = false;
1255 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1256 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1257 /* Push and remove the vlan tag */
1258 struct vlan_hdr *vlan_header =
1259 (struct vlan_hdr *) (data + ETH_HLEN);
1260 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1261 pkt_len -= VLAN_HLEN;
1263 vlan_packet_rcvd = true;
1266 /* This does 16 byte alignment, exactly what we need.
1267 * The packet length includes FCS, but we don't want to
1268 * include that when passing upstream as it messes up
1269 * bridging applications.
1271 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
1273 if (unlikely(!skb)) {
1274 ndev->stats.rx_dropped++;
1276 int payload_offset = (2 * ETH_ALEN);
1277 skb_reserve(skb, NET_IP_ALIGN);
1278 skb_put(skb, pkt_len - 4); /* Make room */
1280 /* Extract the frame data without the VLAN header. */
1281 skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
1282 if (vlan_packet_rcvd)
1283 payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
1284 skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
1285 data + payload_offset,
1286 pkt_len - 4 - (2 * ETH_ALEN));
1288 skb->protocol = eth_type_trans(skb, ndev);
1290 /* Get receive timestamp from the skb */
1291 if (fep->hwts_rx_en && fep->bufdesc_ex) {
1292 struct skb_shared_hwtstamps *shhwtstamps =
1294 unsigned long flags;
1296 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1298 spin_lock_irqsave(&fep->tmreg_lock, flags);
1299 shhwtstamps->hwtstamp = ns_to_ktime(
1300 timecounter_cyc2time(&fep->tc, ebdp->ts));
1301 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1304 if (fep->bufdesc_ex &&
1305 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1306 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1307 /* don't check it */
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1310 skb_checksum_none_assert(skb);
1314 /* Handle received VLAN packets */
1315 if (vlan_packet_rcvd)
1316 __vlan_hwaccel_put_tag(skb,
1320 napi_gro_receive(&fep->napi, skb);
1323 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1324 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1326 /* Clear the status flags for this buffer */
1327 status &= ~BD_ENET_RX_STATS;
1329 /* Mark the buffer empty */
1330 status |= BD_ENET_RX_EMPTY;
1331 bdp->cbd_sc = status;
1333 if (fep->bufdesc_ex) {
1334 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1336 ebdp->cbd_esc = BD_ENET_RX_INT;
1341 /* Update BD pointer to next entry */
1342 bdp = fec_enet_get_nextdesc(bdp, fep);
1344 /* Doing this here will keep the FEC running while we process
1345 * incoming frames. On a heavily loaded network, we should be
1346 * able to keep up at the expense of system resources.
1348 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1352 return pkt_received;
1356 fec_enet_interrupt(int irq, void *dev_id)
1358 struct net_device *ndev = dev_id;
1359 struct fec_enet_private *fep = netdev_priv(ndev);
1360 const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
1362 irqreturn_t ret = IRQ_NONE;
1364 int_events = readl(fep->hwp + FEC_IEVENT);
1365 writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
1367 if (int_events & napi_mask) {
1370 /* Disable the NAPI interrupts */
1371 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1372 napi_schedule(&fep->napi);
1375 if (int_events & FEC_ENET_MII) {
1377 complete(&fep->mdio_done);
1383 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1385 struct net_device *ndev = napi->dev;
1386 struct fec_enet_private *fep = netdev_priv(ndev);
1390 * Clear any pending transmit or receive interrupts before
1391 * processing the rings to avoid racing with the hardware.
1393 writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
1395 pkts = fec_enet_rx(ndev, budget);
1399 if (pkts < budget) {
1400 napi_complete(napi);
1401 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1406 /* ------------------------------------------------------------------------- */
1407 static void fec_get_mac(struct net_device *ndev)
1409 struct fec_enet_private *fep = netdev_priv(ndev);
1410 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1411 unsigned char *iap, tmpaddr[ETH_ALEN];
1414 * try to get mac address in following order:
1416 * 1) module parameter via kernel command line in form
1417 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1422 * 2) from device tree data
1424 if (!is_valid_ether_addr(iap)) {
1425 struct device_node *np = fep->pdev->dev.of_node;
1427 const char *mac = of_get_mac_address(np);
1429 iap = (unsigned char *) mac;
1434 * 3) from flash or fuse (via platform data)
1436 if (!is_valid_ether_addr(iap)) {
1439 iap = (unsigned char *)FEC_FLASHMAC;
1442 iap = (unsigned char *)&pdata->mac;
1447 * 4) FEC mac registers set by bootloader
1449 if (!is_valid_ether_addr(iap)) {
1450 *((__be32 *) &tmpaddr[0]) =
1451 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1452 *((__be16 *) &tmpaddr[4]) =
1453 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1458 * 5) random mac address
1460 if (!is_valid_ether_addr(iap)) {
1461 /* Report it and use a random ethernet address instead */
1462 netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
1463 eth_hw_addr_random(ndev);
1464 netdev_info(ndev, "Using random MAC address: %pM\n",
1469 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1471 /* Adjust MAC if using macaddr */
1473 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1476 /* ------------------------------------------------------------------------- */
1481 static void fec_enet_adjust_link(struct net_device *ndev)
1483 struct fec_enet_private *fep = netdev_priv(ndev);
1484 struct phy_device *phy_dev = fep->phy_dev;
1485 int status_change = 0;
1487 /* Prevent a state halted on mii error */
1488 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
1489 phy_dev->state = PHY_RESUMING;
1494 * If the netdev is down, or is going down, we're not interested
1495 * in link state events, so just mark our idea of the link as down
1496 * and ignore the event.
1498 if (!netif_running(ndev) || !netif_device_present(ndev)) {
1500 } else if (phy_dev->link) {
1502 fep->link = phy_dev->link;
1506 if (fep->full_duplex != phy_dev->duplex) {
1507 fep->full_duplex = phy_dev->duplex;
1511 if (phy_dev->speed != fep->speed) {
1512 fep->speed = phy_dev->speed;
1516 /* if any of the above changed restart the FEC */
1517 if (status_change) {
1518 napi_disable(&fep->napi);
1519 netif_tx_lock_bh(ndev);
1521 netif_wake_queue(ndev);
1522 netif_tx_unlock_bh(ndev);
1523 napi_enable(&fep->napi);
1527 napi_disable(&fep->napi);
1528 netif_tx_lock_bh(ndev);
1530 netif_tx_unlock_bh(ndev);
1531 napi_enable(&fep->napi);
1532 fep->link = phy_dev->link;
1538 phy_print_status(phy_dev);
1541 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1543 struct fec_enet_private *fep = bus->priv;
1544 unsigned long time_left;
1546 fep->mii_timeout = 0;
1547 init_completion(&fep->mdio_done);
1549 /* start a read op */
1550 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1551 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1552 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1554 /* wait for end of transfer */
1555 time_left = wait_for_completion_timeout(&fep->mdio_done,
1556 usecs_to_jiffies(FEC_MII_TIMEOUT));
1557 if (time_left == 0) {
1558 fep->mii_timeout = 1;
1559 netdev_err(fep->netdev, "MDIO read timeout\n");
1564 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1567 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1570 struct fec_enet_private *fep = bus->priv;
1571 unsigned long time_left;
1573 fep->mii_timeout = 0;
1574 init_completion(&fep->mdio_done);
1576 /* start a write op */
1577 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1578 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1579 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1580 fep->hwp + FEC_MII_DATA);
1582 /* wait for end of transfer */
1583 time_left = wait_for_completion_timeout(&fep->mdio_done,
1584 usecs_to_jiffies(FEC_MII_TIMEOUT));
1585 if (time_left == 0) {
1586 fep->mii_timeout = 1;
1587 netdev_err(fep->netdev, "MDIO write timeout\n");
1594 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1596 struct fec_enet_private *fep = netdev_priv(ndev);
1600 ret = clk_prepare_enable(fep->clk_ahb);
1603 ret = clk_prepare_enable(fep->clk_ipg);
1605 goto failed_clk_ipg;
1606 if (fep->clk_enet_out) {
1607 ret = clk_prepare_enable(fep->clk_enet_out);
1609 goto failed_clk_enet_out;
1612 ret = clk_prepare_enable(fep->clk_ptp);
1614 goto failed_clk_ptp;
1617 clk_disable_unprepare(fep->clk_ahb);
1618 clk_disable_unprepare(fep->clk_ipg);
1619 if (fep->clk_enet_out)
1620 clk_disable_unprepare(fep->clk_enet_out);
1622 clk_disable_unprepare(fep->clk_ptp);
1627 if (fep->clk_enet_out)
1628 clk_disable_unprepare(fep->clk_enet_out);
1629 failed_clk_enet_out:
1630 clk_disable_unprepare(fep->clk_ipg);
1632 clk_disable_unprepare(fep->clk_ahb);
1637 static int fec_enet_mii_probe(struct net_device *ndev)
1639 struct fec_enet_private *fep = netdev_priv(ndev);
1640 const struct platform_device_id *id_entry =
1641 platform_get_device_id(fep->pdev);
1642 struct phy_device *phy_dev = NULL;
1643 char mdio_bus_id[MII_BUS_ID_SIZE];
1644 char phy_name[MII_BUS_ID_SIZE + 3];
1646 int dev_id = fep->dev_id;
1648 fep->phy_dev = NULL;
1650 /* check for attached phy */
1651 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1652 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1654 if (fep->mii_bus->phy_map[phy_id] == NULL)
1656 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1660 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1664 if (phy_id >= PHY_MAX_ADDR) {
1665 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1666 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1670 snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
1671 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
1672 fep->phy_interface);
1673 if (IS_ERR(phy_dev)) {
1674 netdev_err(ndev, "could not attach to PHY\n");
1675 return PTR_ERR(phy_dev);
1678 /* mask with MAC supported features */
1679 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1680 phy_dev->supported &= PHY_GBIT_FEATURES;
1681 phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
1682 #if !defined(CONFIG_M5272)
1683 phy_dev->supported |= SUPPORTED_Pause;
1687 phy_dev->supported &= PHY_BASIC_FEATURES;
1689 phy_dev->advertising = phy_dev->supported;
1691 fep->phy_dev = phy_dev;
1693 fep->full_duplex = 0;
1695 netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1696 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1702 static int fec_enet_mii_init(struct platform_device *pdev)
1704 static struct mii_bus *fec0_mii_bus;
1705 struct net_device *ndev = platform_get_drvdata(pdev);
1706 struct fec_enet_private *fep = netdev_priv(ndev);
1707 const struct platform_device_id *id_entry =
1708 platform_get_device_id(fep->pdev);
1709 int err = -ENXIO, i;
1712 * The dual fec interfaces are not equivalent with enet-mac.
1713 * Here are the differences:
1715 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1716 * - fec0 acts as the 1588 time master while fec1 is slave
1717 * - external phys can only be configured by fec0
1719 * That is to say fec1 can not work independently. It only works
1720 * when fec0 is working. The reason behind this design is that the
1721 * second interface is added primarily for Switch mode.
1723 * Because of the last point above, both phys are attached on fec0
1724 * mdio interface in board design, and need to be configured by
1727 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1728 /* fec1 uses fec0 mii_bus */
1729 if (mii_cnt && fec0_mii_bus) {
1730 fep->mii_bus = fec0_mii_bus;
1737 fep->mii_timeout = 0;
1740 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1742 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1743 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
1744 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1747 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1748 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1750 fep->phy_speed <<= 1;
1751 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1753 fep->mii_bus = mdiobus_alloc();
1754 if (fep->mii_bus == NULL) {
1759 fep->mii_bus->name = "fec_enet_mii_bus";
1760 fep->mii_bus->read = fec_enet_mdio_read;
1761 fep->mii_bus->write = fec_enet_mdio_write;
1762 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1763 pdev->name, fep->dev_id + 1);
1764 fep->mii_bus->priv = fep;
1765 fep->mii_bus->parent = &pdev->dev;
1767 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1768 if (!fep->mii_bus->irq) {
1770 goto err_out_free_mdiobus;
1773 for (i = 0; i < PHY_MAX_ADDR; i++)
1774 fep->mii_bus->irq[i] = PHY_POLL;
1776 if (mdiobus_register(fep->mii_bus))
1777 goto err_out_free_mdio_irq;
1781 /* save fec0 mii_bus */
1782 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1783 fec0_mii_bus = fep->mii_bus;
1787 err_out_free_mdio_irq:
1788 kfree(fep->mii_bus->irq);
1789 err_out_free_mdiobus:
1790 mdiobus_free(fep->mii_bus);
1795 static void fec_enet_mii_remove(struct fec_enet_private *fep)
1797 if (--mii_cnt == 0) {
1798 mdiobus_unregister(fep->mii_bus);
1799 kfree(fep->mii_bus->irq);
1800 mdiobus_free(fep->mii_bus);
1804 static int fec_enet_get_settings(struct net_device *ndev,
1805 struct ethtool_cmd *cmd)
1807 struct fec_enet_private *fep = netdev_priv(ndev);
1808 struct phy_device *phydev = fep->phy_dev;
1813 return phy_ethtool_gset(phydev, cmd);
1816 static int fec_enet_set_settings(struct net_device *ndev,
1817 struct ethtool_cmd *cmd)
1819 struct fec_enet_private *fep = netdev_priv(ndev);
1820 struct phy_device *phydev = fep->phy_dev;
1825 return phy_ethtool_sset(phydev, cmd);
1828 static void fec_enet_get_drvinfo(struct net_device *ndev,
1829 struct ethtool_drvinfo *info)
1831 struct fec_enet_private *fep = netdev_priv(ndev);
1833 strlcpy(info->driver, fep->pdev->dev.driver->name,
1834 sizeof(info->driver));
1835 strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
1836 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
1839 static int fec_enet_get_ts_info(struct net_device *ndev,
1840 struct ethtool_ts_info *info)
1842 struct fec_enet_private *fep = netdev_priv(ndev);
1844 if (fep->bufdesc_ex) {
1846 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1847 SOF_TIMESTAMPING_RX_SOFTWARE |
1848 SOF_TIMESTAMPING_SOFTWARE |
1849 SOF_TIMESTAMPING_TX_HARDWARE |
1850 SOF_TIMESTAMPING_RX_HARDWARE |
1851 SOF_TIMESTAMPING_RAW_HARDWARE;
1853 info->phc_index = ptp_clock_index(fep->ptp_clock);
1855 info->phc_index = -1;
1857 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1858 (1 << HWTSTAMP_TX_ON);
1860 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1861 (1 << HWTSTAMP_FILTER_ALL);
1864 return ethtool_op_get_ts_info(ndev, info);
1868 #if !defined(CONFIG_M5272)
1870 static void fec_enet_get_pauseparam(struct net_device *ndev,
1871 struct ethtool_pauseparam *pause)
1873 struct fec_enet_private *fep = netdev_priv(ndev);
1875 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
1876 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
1877 pause->rx_pause = pause->tx_pause;
1880 static int fec_enet_set_pauseparam(struct net_device *ndev,
1881 struct ethtool_pauseparam *pause)
1883 struct fec_enet_private *fep = netdev_priv(ndev);
1888 if (pause->tx_pause != pause->rx_pause) {
1890 "hardware only support enable/disable both tx and rx");
1894 fep->pause_flag = 0;
1896 /* tx pause must be same as rx pause */
1897 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
1898 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1900 if (pause->rx_pause || pause->autoneg) {
1901 fep->phy_dev->supported |= ADVERTISED_Pause;
1902 fep->phy_dev->advertising |= ADVERTISED_Pause;
1904 fep->phy_dev->supported &= ~ADVERTISED_Pause;
1905 fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1908 if (pause->autoneg) {
1909 if (netif_running(ndev))
1911 phy_start_aneg(fep->phy_dev);
1913 if (netif_running(ndev)) {
1914 napi_disable(&fep->napi);
1915 netif_tx_lock_bh(ndev);
1917 netif_wake_queue(ndev);
1918 netif_tx_unlock_bh(ndev);
1919 napi_enable(&fep->napi);
1925 static const struct fec_stat {
1926 char name[ETH_GSTRING_LEN];
1930 { "tx_dropped", RMON_T_DROP },
1931 { "tx_packets", RMON_T_PACKETS },
1932 { "tx_broadcast", RMON_T_BC_PKT },
1933 { "tx_multicast", RMON_T_MC_PKT },
1934 { "tx_crc_errors", RMON_T_CRC_ALIGN },
1935 { "tx_undersize", RMON_T_UNDERSIZE },
1936 { "tx_oversize", RMON_T_OVERSIZE },
1937 { "tx_fragment", RMON_T_FRAG },
1938 { "tx_jabber", RMON_T_JAB },
1939 { "tx_collision", RMON_T_COL },
1940 { "tx_64byte", RMON_T_P64 },
1941 { "tx_65to127byte", RMON_T_P65TO127 },
1942 { "tx_128to255byte", RMON_T_P128TO255 },
1943 { "tx_256to511byte", RMON_T_P256TO511 },
1944 { "tx_512to1023byte", RMON_T_P512TO1023 },
1945 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
1946 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
1947 { "tx_octets", RMON_T_OCTETS },
1950 { "IEEE_tx_drop", IEEE_T_DROP },
1951 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
1952 { "IEEE_tx_1col", IEEE_T_1COL },
1953 { "IEEE_tx_mcol", IEEE_T_MCOL },
1954 { "IEEE_tx_def", IEEE_T_DEF },
1955 { "IEEE_tx_lcol", IEEE_T_LCOL },
1956 { "IEEE_tx_excol", IEEE_T_EXCOL },
1957 { "IEEE_tx_macerr", IEEE_T_MACERR },
1958 { "IEEE_tx_cserr", IEEE_T_CSERR },
1959 { "IEEE_tx_sqe", IEEE_T_SQE },
1960 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
1961 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
1964 { "rx_packets", RMON_R_PACKETS },
1965 { "rx_broadcast", RMON_R_BC_PKT },
1966 { "rx_multicast", RMON_R_MC_PKT },
1967 { "rx_crc_errors", RMON_R_CRC_ALIGN },
1968 { "rx_undersize", RMON_R_UNDERSIZE },
1969 { "rx_oversize", RMON_R_OVERSIZE },
1970 { "rx_fragment", RMON_R_FRAG },
1971 { "rx_jabber", RMON_R_JAB },
1972 { "rx_64byte", RMON_R_P64 },
1973 { "rx_65to127byte", RMON_R_P65TO127 },
1974 { "rx_128to255byte", RMON_R_P128TO255 },
1975 { "rx_256to511byte", RMON_R_P256TO511 },
1976 { "rx_512to1023byte", RMON_R_P512TO1023 },
1977 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
1978 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
1979 { "rx_octets", RMON_R_OCTETS },
1982 { "IEEE_rx_drop", IEEE_R_DROP },
1983 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
1984 { "IEEE_rx_crc", IEEE_R_CRC },
1985 { "IEEE_rx_align", IEEE_R_ALIGN },
1986 { "IEEE_rx_macerr", IEEE_R_MACERR },
1987 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
1988 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1991 static void fec_enet_get_ethtool_stats(struct net_device *dev,
1992 struct ethtool_stats *stats, u64 *data)
1994 struct fec_enet_private *fep = netdev_priv(dev);
1997 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1998 data[i] = readl(fep->hwp + fec_stats[i].offset);
2001 static void fec_enet_get_strings(struct net_device *netdev,
2002 u32 stringset, u8 *data)
2005 switch (stringset) {
2007 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2008 memcpy(data + i * ETH_GSTRING_LEN,
2009 fec_stats[i].name, ETH_GSTRING_LEN);
2014 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2018 return ARRAY_SIZE(fec_stats);
2023 #endif /* !defined(CONFIG_M5272) */
2025 static int fec_enet_nway_reset(struct net_device *dev)
2027 struct fec_enet_private *fep = netdev_priv(dev);
2028 struct phy_device *phydev = fep->phy_dev;
2033 return genphy_restart_aneg(phydev);
2036 static const struct ethtool_ops fec_enet_ethtool_ops = {
2037 .get_settings = fec_enet_get_settings,
2038 .set_settings = fec_enet_set_settings,
2039 .get_drvinfo = fec_enet_get_drvinfo,
2040 .nway_reset = fec_enet_nway_reset,
2041 .get_link = ethtool_op_get_link,
2042 #ifndef CONFIG_M5272
2043 .get_pauseparam = fec_enet_get_pauseparam,
2044 .set_pauseparam = fec_enet_set_pauseparam,
2045 .get_strings = fec_enet_get_strings,
2046 .get_ethtool_stats = fec_enet_get_ethtool_stats,
2047 .get_sset_count = fec_enet_get_sset_count,
2049 .get_ts_info = fec_enet_get_ts_info,
2052 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2054 struct fec_enet_private *fep = netdev_priv(ndev);
2055 struct phy_device *phydev = fep->phy_dev;
2057 if (!netif_running(ndev))
2063 if (fep->bufdesc_ex) {
2064 if (cmd == SIOCSHWTSTAMP)
2065 return fec_ptp_set(ndev, rq);
2066 if (cmd == SIOCGHWTSTAMP)
2067 return fec_ptp_get(ndev, rq);
2070 return phy_mii_ioctl(phydev, rq, cmd);
2073 static void fec_enet_free_buffers(struct net_device *ndev)
2075 struct fec_enet_private *fep = netdev_priv(ndev);
2077 struct sk_buff *skb;
2078 struct bufdesc *bdp;
2080 bdp = fep->rx_bd_base;
2081 for (i = 0; i < fep->rx_ring_size; i++) {
2082 skb = fep->rx_skbuff[i];
2083 fep->rx_skbuff[i] = NULL;
2085 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
2086 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2089 bdp = fec_enet_get_nextdesc(bdp, fep);
2092 bdp = fep->tx_bd_base;
2093 for (i = 0; i < fep->tx_ring_size; i++) {
2094 kfree(fep->tx_bounce[i]);
2095 fep->tx_bounce[i] = NULL;
2096 skb = fep->tx_skbuff[i];
2097 fep->tx_skbuff[i] = NULL;
2102 static int fec_enet_alloc_buffers(struct net_device *ndev)
2104 struct fec_enet_private *fep = netdev_priv(ndev);
2106 struct sk_buff *skb;
2107 struct bufdesc *bdp;
2109 bdp = fep->rx_bd_base;
2110 for (i = 0; i < fep->rx_ring_size; i++) {
2113 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2117 addr = dma_map_single(&fep->pdev->dev, skb->data,
2118 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
2119 if (dma_mapping_error(&fep->pdev->dev, addr)) {
2121 if (net_ratelimit())
2122 netdev_err(ndev, "Rx DMA memory map failed\n");
2126 fep->rx_skbuff[i] = skb;
2127 bdp->cbd_bufaddr = addr;
2128 bdp->cbd_sc = BD_ENET_RX_EMPTY;
2130 if (fep->bufdesc_ex) {
2131 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2132 ebdp->cbd_esc = BD_ENET_RX_INT;
2135 bdp = fec_enet_get_nextdesc(bdp, fep);
2138 /* Set the last buffer to wrap. */
2139 bdp = fec_enet_get_prevdesc(bdp, fep);
2140 bdp->cbd_sc |= BD_SC_WRAP;
2142 bdp = fep->tx_bd_base;
2143 for (i = 0; i < fep->tx_ring_size; i++) {
2144 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2145 if (!fep->tx_bounce[i])
2149 bdp->cbd_bufaddr = 0;
2151 if (fep->bufdesc_ex) {
2152 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2153 ebdp->cbd_esc = BD_ENET_TX_INT;
2156 bdp = fec_enet_get_nextdesc(bdp, fep);
2159 /* Set the last buffer to wrap. */
2160 bdp = fec_enet_get_prevdesc(bdp, fep);
2161 bdp->cbd_sc |= BD_SC_WRAP;
2166 fec_enet_free_buffers(ndev);
2171 fec_enet_open(struct net_device *ndev)
2173 struct fec_enet_private *fep = netdev_priv(ndev);
2176 pinctrl_pm_select_default_state(&fep->pdev->dev);
2177 ret = fec_enet_clk_enable(ndev, true);
2181 /* I should reset the ring buffers here, but I don't yet know
2182 * a simple way to do that.
2185 ret = fec_enet_alloc_buffers(ndev);
2189 /* Probe and connect to PHY when open the interface */
2190 ret = fec_enet_mii_probe(ndev);
2192 fec_enet_free_buffers(ndev);
2197 napi_enable(&fep->napi);
2198 phy_start(fep->phy_dev);
2199 netif_start_queue(ndev);
2204 fec_enet_close(struct net_device *ndev)
2206 struct fec_enet_private *fep = netdev_priv(ndev);
2208 phy_stop(fep->phy_dev);
2210 if (netif_device_present(ndev)) {
2211 napi_disable(&fep->napi);
2212 netif_tx_disable(ndev);
2216 phy_disconnect(fep->phy_dev);
2217 fep->phy_dev = NULL;
2219 fec_enet_clk_enable(ndev, false);
2220 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2221 fec_enet_free_buffers(ndev);
2226 /* Set or clear the multicast filter for this adaptor.
2227 * Skeleton taken from sunlance driver.
2228 * The CPM Ethernet implementation allows Multicast as well as individual
2229 * MAC address filtering. Some of the drivers check to make sure it is
2230 * a group multicast address, and discard those that are not. I guess I
2231 * will do the same for now, but just remove the test if you want
2232 * individual filtering as well (do the upper net layers want or support
2233 * this kind of feature?).
2236 #define HASH_BITS 6 /* #bits in hash */
2237 #define CRC32_POLY 0xEDB88320
2239 static void set_multicast_list(struct net_device *ndev)
2241 struct fec_enet_private *fep = netdev_priv(ndev);
2242 struct netdev_hw_addr *ha;
2243 unsigned int i, bit, data, crc, tmp;
2246 if (ndev->flags & IFF_PROMISC) {
2247 tmp = readl(fep->hwp + FEC_R_CNTRL);
2249 writel(tmp, fep->hwp + FEC_R_CNTRL);
2253 tmp = readl(fep->hwp + FEC_R_CNTRL);
2255 writel(tmp, fep->hwp + FEC_R_CNTRL);
2257 if (ndev->flags & IFF_ALLMULTI) {
2258 /* Catch all multicast addresses, so set the
2261 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2262 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2267 /* Clear filter and add the addresses in hash register
2269 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2270 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2272 netdev_for_each_mc_addr(ha, ndev) {
2273 /* calculate crc32 value of mac address */
2276 for (i = 0; i < ndev->addr_len; i++) {
2278 for (bit = 0; bit < 8; bit++, data >>= 1) {
2280 (((crc ^ data) & 1) ? CRC32_POLY : 0);
2284 /* only upper 6 bits (HASH_BITS) are used
2285 * which point to specific bit in he hash registers
2287 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2290 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2291 tmp |= 1 << (hash - 32);
2292 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2294 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2296 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2301 /* Set a MAC change in hardware. */
2303 fec_set_mac_address(struct net_device *ndev, void *p)
2305 struct fec_enet_private *fep = netdev_priv(ndev);
2306 struct sockaddr *addr = p;
2309 if (!is_valid_ether_addr(addr->sa_data))
2310 return -EADDRNOTAVAIL;
2311 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
2314 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
2315 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
2316 fep->hwp + FEC_ADDR_LOW);
2317 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2318 fep->hwp + FEC_ADDR_HIGH);
2322 #ifdef CONFIG_NET_POLL_CONTROLLER
2324 * fec_poll_controller - FEC Poll controller function
2325 * @dev: The FEC network adapter
2327 * Polled functionality used by netconsole and others in non interrupt mode
2330 static void fec_poll_controller(struct net_device *dev)
2333 struct fec_enet_private *fep = netdev_priv(dev);
2335 for (i = 0; i < FEC_IRQ_NUM; i++) {
2336 if (fep->irq[i] > 0) {
2337 disable_irq(fep->irq[i]);
2338 fec_enet_interrupt(fep->irq[i], dev);
2339 enable_irq(fep->irq[i]);
2345 #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2347 static int fec_set_features(struct net_device *netdev,
2348 netdev_features_t features)
2350 struct fec_enet_private *fep = netdev_priv(netdev);
2351 netdev_features_t changed = features ^ netdev->features;
2353 /* Quiesce the device if necessary */
2354 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2355 napi_disable(&fep->napi);
2356 netif_tx_lock_bh(netdev);
2360 netdev->features = features;
2362 /* Receive checksum has been changed */
2363 if (changed & NETIF_F_RXCSUM) {
2364 if (features & NETIF_F_RXCSUM)
2365 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2367 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
2370 /* Resume the device after updates */
2371 if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2372 fec_restart(netdev);
2373 netif_wake_queue(netdev);
2374 netif_tx_unlock_bh(netdev);
2375 napi_enable(&fep->napi);
2381 static const struct net_device_ops fec_netdev_ops = {
2382 .ndo_open = fec_enet_open,
2383 .ndo_stop = fec_enet_close,
2384 .ndo_start_xmit = fec_enet_start_xmit,
2385 .ndo_set_rx_mode = set_multicast_list,
2386 .ndo_change_mtu = eth_change_mtu,
2387 .ndo_validate_addr = eth_validate_addr,
2388 .ndo_tx_timeout = fec_timeout,
2389 .ndo_set_mac_address = fec_set_mac_address,
2390 .ndo_do_ioctl = fec_enet_ioctl,
2391 #ifdef CONFIG_NET_POLL_CONTROLLER
2392 .ndo_poll_controller = fec_poll_controller,
2394 .ndo_set_features = fec_set_features,
2398 * XXX: We need to clean up on failure exits here.
2401 static int fec_enet_init(struct net_device *ndev)
2403 struct fec_enet_private *fep = netdev_priv(ndev);
2404 const struct platform_device_id *id_entry =
2405 platform_get_device_id(fep->pdev);
2406 struct bufdesc *cbd_base;
2409 /* init the tx & rx ring size */
2410 fep->tx_ring_size = TX_RING_SIZE;
2411 fep->rx_ring_size = RX_RING_SIZE;
2413 fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2414 fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
2416 if (fep->bufdesc_ex)
2417 fep->bufdesc_size = sizeof(struct bufdesc_ex);
2419 fep->bufdesc_size = sizeof(struct bufdesc);
2420 bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
2423 /* Allocate memory for buffer descriptors. */
2424 cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
2429 fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
2430 &fep->tso_hdrs_dma, GFP_KERNEL);
2431 if (!fep->tso_hdrs) {
2432 dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
2436 memset(cbd_base, 0, PAGE_SIZE);
2440 /* Get the Ethernet address */
2442 /* make sure MAC we just acquired is programmed into the hw */
2443 fec_set_mac_address(ndev, NULL);
2445 /* Set receive and transmit descriptor base. */
2446 fep->rx_bd_base = cbd_base;
2447 if (fep->bufdesc_ex)
2448 fep->tx_bd_base = (struct bufdesc *)
2449 (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
2451 fep->tx_bd_base = cbd_base + fep->rx_ring_size;
2453 /* The FEC Ethernet specific entries in the device structure */
2454 ndev->watchdog_timeo = TX_TIMEOUT;
2455 ndev->netdev_ops = &fec_netdev_ops;
2456 ndev->ethtool_ops = &fec_enet_ethtool_ops;
2458 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
2459 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
2461 if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
2462 /* enable hw VLAN support */
2463 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2465 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
2466 ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
2468 /* enable hw accelerator */
2469 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2470 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
2471 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
2474 ndev->hw_features = ndev->features;
2482 static void fec_reset_phy(struct platform_device *pdev)
2486 struct device_node *np = pdev->dev.of_node;
2491 of_property_read_u32(np, "phy-reset-duration", &msec);
2492 /* A sane reset duration should not be longer than 1s */
2496 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2497 if (!gpio_is_valid(phy_reset))
2500 err = devm_gpio_request_one(&pdev->dev, phy_reset,
2501 GPIOF_OUT_INIT_LOW, "phy-reset");
2503 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2507 gpio_set_value(phy_reset, 1);
2509 #else /* CONFIG_OF */
2510 static void fec_reset_phy(struct platform_device *pdev)
2513 * In case of platform probe, the reset has been done
2517 #endif /* CONFIG_OF */
2520 fec_probe(struct platform_device *pdev)
2522 struct fec_enet_private *fep;
2523 struct fec_platform_data *pdata;
2524 struct net_device *ndev;
2525 int i, irq, ret = 0;
2527 const struct of_device_id *of_id;
2530 of_id = of_match_device(fec_dt_ids, &pdev->dev);
2532 pdev->id_entry = of_id->data;
2534 /* Init network device */
2535 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
2539 SET_NETDEV_DEV(ndev, &pdev->dev);
2541 /* setup board info structure */
2542 fep = netdev_priv(ndev);
2544 #if !defined(CONFIG_M5272)
2545 /* default enable pause frame auto negotiation */
2546 if (pdev->id_entry &&
2547 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
2548 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
2551 /* Select default pin state */
2552 pinctrl_pm_select_default_state(&pdev->dev);
2554 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2555 fep->hwp = devm_ioremap_resource(&pdev->dev, r);
2556 if (IS_ERR(fep->hwp)) {
2557 ret = PTR_ERR(fep->hwp);
2558 goto failed_ioremap;
2562 fep->dev_id = dev_id++;
2564 fep->bufdesc_ex = 0;
2566 platform_set_drvdata(pdev, ndev);
2568 ret = of_get_phy_mode(pdev->dev.of_node);
2570 pdata = dev_get_platdata(&pdev->dev);
2572 fep->phy_interface = pdata->phy;
2574 fep->phy_interface = PHY_INTERFACE_MODE_MII;
2576 fep->phy_interface = ret;
2579 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2580 if (IS_ERR(fep->clk_ipg)) {
2581 ret = PTR_ERR(fep->clk_ipg);
2585 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2586 if (IS_ERR(fep->clk_ahb)) {
2587 ret = PTR_ERR(fep->clk_ahb);
2591 /* enet_out is optional, depends on board */
2592 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
2593 if (IS_ERR(fep->clk_enet_out))
2594 fep->clk_enet_out = NULL;
2596 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
2598 pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
2599 if (IS_ERR(fep->clk_ptp)) {
2600 fep->clk_ptp = NULL;
2601 fep->bufdesc_ex = 0;
2604 ret = fec_enet_clk_enable(ndev, true);
2608 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2609 if (!IS_ERR(fep->reg_phy)) {
2610 ret = regulator_enable(fep->reg_phy);
2613 "Failed to enable phy regulator: %d\n", ret);
2614 goto failed_regulator;
2617 fep->reg_phy = NULL;
2620 fec_reset_phy(pdev);
2622 if (fep->bufdesc_ex)
2625 ret = fec_enet_init(ndev);
2629 for (i = 0; i < FEC_IRQ_NUM; i++) {
2630 irq = platform_get_irq(pdev, i);
2637 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
2638 0, pdev->name, ndev);
2643 ret = fec_enet_mii_init(pdev);
2645 goto failed_mii_init;
2647 /* Carrier starts down, phylib will bring it up */
2648 netif_carrier_off(ndev);
2649 fec_enet_clk_enable(ndev, false);
2650 pinctrl_pm_select_sleep_state(&pdev->dev);
2652 ret = register_netdev(ndev);
2654 goto failed_register;
2656 if (fep->bufdesc_ex && fep->ptp_clock)
2657 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
2659 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
2663 fec_enet_mii_remove(fep);
2668 regulator_disable(fep->reg_phy);
2670 fec_enet_clk_enable(ndev, false);
2679 fec_drv_remove(struct platform_device *pdev)
2681 struct net_device *ndev = platform_get_drvdata(pdev);
2682 struct fec_enet_private *fep = netdev_priv(ndev);
2684 cancel_work_sync(&fep->tx_timeout_work);
2685 unregister_netdev(ndev);
2686 fec_enet_mii_remove(fep);
2687 del_timer_sync(&fep->time_keep);
2689 regulator_disable(fep->reg_phy);
2691 ptp_clock_unregister(fep->ptp_clock);
2692 fec_enet_clk_enable(ndev, false);
2698 #ifdef CONFIG_PM_SLEEP
2700 fec_suspend(struct device *dev)
2702 struct net_device *ndev = dev_get_drvdata(dev);
2703 struct fec_enet_private *fep = netdev_priv(ndev);
2706 if (netif_running(ndev)) {
2707 phy_stop(fep->phy_dev);
2708 napi_disable(&fep->napi);
2709 netif_tx_lock_bh(ndev);
2710 netif_device_detach(ndev);
2711 netif_tx_unlock_bh(ndev);
2716 fec_enet_clk_enable(ndev, false);
2717 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2720 regulator_disable(fep->reg_phy);
2726 fec_resume(struct device *dev)
2728 struct net_device *ndev = dev_get_drvdata(dev);
2729 struct fec_enet_private *fep = netdev_priv(ndev);
2733 ret = regulator_enable(fep->reg_phy);
2738 pinctrl_pm_select_default_state(&fep->pdev->dev);
2739 ret = fec_enet_clk_enable(ndev, true);
2744 if (netif_running(ndev)) {
2746 netif_tx_lock_bh(ndev);
2747 netif_device_attach(ndev);
2748 netif_tx_unlock_bh(ndev);
2749 napi_enable(&fep->napi);
2750 phy_start(fep->phy_dev);
2758 regulator_disable(fep->reg_phy);
2761 #endif /* CONFIG_PM_SLEEP */
2763 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
2765 static struct platform_driver fec_driver = {
2767 .name = DRIVER_NAME,
2768 .owner = THIS_MODULE,
2770 .of_match_table = fec_dt_ids,
2772 .id_table = fec_devtype,
2774 .remove = fec_drv_remove,
2777 module_platform_driver(fec_driver);
2779 MODULE_ALIAS("platform:"DRIVER_NAME);
2780 MODULE_LICENSE("GPL");