2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
21 * Gianfar: AKA Lambda Draconis, "Dragon"
29 * The driver is initialized through of_device. Configuration information
30 * is therefore conveyed through an OF-style device tree.
32 * The Gianfar Ethernet Controller uses a ring of buffer
33 * descriptors. The beginning is indicated by a register
34 * pointing to the physical address of the start of the ring.
35 * The end is determined by a "wrap" bit being set in the
36 * last descriptor of the ring.
38 * When a packet is received, the RXF bit in the
39 * IEVENT register is set, triggering an interrupt when the
40 * corresponding bit in the IMASK register is also set (if
41 * interrupt coalescing is active, then the interrupt may not
42 * happen immediately, but will wait until either a set number
43 * of frames or amount of time have passed). In NAPI, the
44 * interrupt handler will signal there is work to be done, and
45 * exit. This method will start at the last known empty
46 * descriptor, and process every subsequent descriptor until there
47 * are none left with data (NAPI will stop after a set number of
48 * packets to give time to other tasks, but will eventually
49 * process all the packets). The data arrives inside a
50 * pre-allocated skb, and so after the skb is passed up to the
51 * stack, a new skb must be allocated, and the address field in
52 * the buffer descriptor must be updated to indicate this new
55 * When the kernel requests that a packet be transmitted, the
56 * driver starts where it left off last time, and points the
57 * descriptor at the buffer which was passed in. The driver
58 * then informs the DMA engine that there are packets ready to
59 * be transmitted. Once the controller is finished transmitting
60 * the packet, an interrupt may be triggered (under the same
61 * conditions as for reception, but depending on the TXF bit).
62 * The driver then cleans up the buffer.
65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
93 #include <asm/uaccess.h>
94 #include <linux/module.h>
95 #include <linux/dma-mapping.h>
96 #include <linux/crc32.h>
97 #include <linux/mii.h>
98 #include <linux/phy.h>
99 #include <linux/phy_fixed.h>
100 #include <linux/of.h>
101 #include <linux/of_net.h>
104 #include "fsl_pq_mdio.h"
106 #define TX_TIMEOUT (1*HZ)
107 #undef BRIEF_GFAR_ERRORS
108 #undef VERBOSE_GFAR_ERRORS
110 const char gfar_driver_name[] = "Gianfar Ethernet";
111 const char gfar_driver_version[] = "1.3";
113 static int gfar_enet_open(struct net_device *dev);
114 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
115 static void gfar_reset_task(struct work_struct *work);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev);
119 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
120 struct sk_buff *skb);
121 static int gfar_set_mac_address(struct net_device *dev);
122 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123 static irqreturn_t gfar_error(int irq, void *dev_id);
124 static irqreturn_t gfar_transmit(int irq, void *dev_id);
125 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126 static void adjust_link(struct net_device *dev);
127 static void init_registers(struct net_device *dev);
128 static int init_phy(struct net_device *dev);
129 static int gfar_probe(struct platform_device *ofdev);
130 static int gfar_remove(struct platform_device *ofdev);
131 static void free_skb_resources(struct gfar_private *priv);
132 static void gfar_set_multi(struct net_device *dev);
133 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134 static void gfar_configure_serdes(struct net_device *dev);
135 static int gfar_poll(struct napi_struct *napi, int budget);
136 #ifdef CONFIG_NET_POLL_CONTROLLER
137 static void gfar_netpoll(struct net_device *dev);
139 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
140 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
141 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
143 void gfar_halt(struct net_device *dev);
144 static void gfar_halt_nodisable(struct net_device *dev);
145 void gfar_start(struct net_device *dev);
146 static void gfar_clear_exact_match(struct net_device *dev);
147 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
149 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
151 MODULE_AUTHOR("Freescale Semiconductor, Inc");
152 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
153 MODULE_LICENSE("GPL");
155 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
162 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
163 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
164 lstatus |= BD_LFLAG(RXBD_WRAP);
168 bdp->lstatus = lstatus;
171 static int gfar_init_bds(struct net_device *ndev)
173 struct gfar_private *priv = netdev_priv(ndev);
174 struct gfar_priv_tx_q *tx_queue = NULL;
175 struct gfar_priv_rx_q *rx_queue = NULL;
180 for (i = 0; i < priv->num_tx_queues; i++) {
181 tx_queue = priv->tx_queue[i];
182 /* Initialize some variables in our dev structure */
183 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
184 tx_queue->dirty_tx = tx_queue->tx_bd_base;
185 tx_queue->cur_tx = tx_queue->tx_bd_base;
186 tx_queue->skb_curtx = 0;
187 tx_queue->skb_dirtytx = 0;
189 /* Initialize Transmit Descriptor Ring */
190 txbdp = tx_queue->tx_bd_base;
191 for (j = 0; j < tx_queue->tx_ring_size; j++) {
197 /* Set the last descriptor in the ring to indicate wrap */
199 txbdp->status |= TXBD_WRAP;
202 for (i = 0; i < priv->num_rx_queues; i++) {
203 rx_queue = priv->rx_queue[i];
204 rx_queue->cur_rx = rx_queue->rx_bd_base;
205 rx_queue->skb_currx = 0;
206 rxbdp = rx_queue->rx_bd_base;
208 for (j = 0; j < rx_queue->rx_ring_size; j++) {
209 struct sk_buff *skb = rx_queue->rx_skbuff[j];
212 gfar_init_rxbdp(rx_queue, rxbdp,
215 skb = gfar_new_skb(ndev);
217 netdev_err(ndev, "Can't allocate RX buffers\n");
218 goto err_rxalloc_fail;
220 rx_queue->rx_skbuff[j] = skb;
222 gfar_new_rxbdp(rx_queue, rxbdp, skb);
233 free_skb_resources(priv);
237 static int gfar_alloc_skb_resources(struct net_device *ndev)
242 struct gfar_private *priv = netdev_priv(ndev);
243 struct device *dev = &priv->ofdev->dev;
244 struct gfar_priv_tx_q *tx_queue = NULL;
245 struct gfar_priv_rx_q *rx_queue = NULL;
247 priv->total_tx_ring_size = 0;
248 for (i = 0; i < priv->num_tx_queues; i++)
249 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
251 priv->total_rx_ring_size = 0;
252 for (i = 0; i < priv->num_rx_queues; i++)
253 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
255 /* Allocate memory for the buffer descriptors */
256 vaddr = dma_alloc_coherent(dev,
257 sizeof(struct txbd8) * priv->total_tx_ring_size +
258 sizeof(struct rxbd8) * priv->total_rx_ring_size,
261 netif_err(priv, ifup, ndev,
262 "Could not allocate buffer descriptors!\n");
266 for (i = 0; i < priv->num_tx_queues; i++) {
267 tx_queue = priv->tx_queue[i];
268 tx_queue->tx_bd_base = vaddr;
269 tx_queue->tx_bd_dma_base = addr;
270 tx_queue->dev = ndev;
271 /* enet DMA only understands physical addresses */
272 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
276 /* Start the rx descriptor ring where the tx ring leaves off */
277 for (i = 0; i < priv->num_rx_queues; i++) {
278 rx_queue = priv->rx_queue[i];
279 rx_queue->rx_bd_base = vaddr;
280 rx_queue->rx_bd_dma_base = addr;
281 rx_queue->dev = ndev;
282 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
286 /* Setup the skbuff rings */
287 for (i = 0; i < priv->num_tx_queues; i++) {
288 tx_queue = priv->tx_queue[i];
289 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
290 tx_queue->tx_ring_size, GFP_KERNEL);
291 if (!tx_queue->tx_skbuff) {
292 netif_err(priv, ifup, ndev,
293 "Could not allocate tx_skbuff\n");
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
306 if (!rx_queue->rx_skbuff) {
307 netif_err(priv, ifup, ndev,
308 "Could not allocate rx_skbuff\n");
312 for (j = 0; j < rx_queue->rx_ring_size; j++)
313 rx_queue->rx_skbuff[j] = NULL;
316 if (gfar_init_bds(ndev))
322 free_skb_resources(priv);
326 static void gfar_init_tx_rx_base(struct gfar_private *priv)
328 struct gfar __iomem *regs = priv->gfargrp[0].regs;
332 baddr = ®s->tbase0;
333 for(i = 0; i < priv->num_tx_queues; i++) {
334 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
338 baddr = ®s->rbase0;
339 for(i = 0; i < priv->num_rx_queues; i++) {
340 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
345 static void gfar_init_mac(struct net_device *ndev)
347 struct gfar_private *priv = netdev_priv(ndev);
348 struct gfar __iomem *regs = priv->gfargrp[0].regs;
353 /* write the tx/rx base registers */
354 gfar_init_tx_rx_base(priv);
356 /* Configure the coalescing support */
357 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359 if (priv->rx_filer_enable) {
360 rctrl |= RCTRL_FILREN;
361 /* Program the RIR0 reg with the required distribution */
362 gfar_write(®s->rir0, DEFAULT_RIR0);
365 if (ndev->features & NETIF_F_RXCSUM)
366 rctrl |= RCTRL_CHECKSUMMING;
368 if (priv->extended_hash) {
369 rctrl |= RCTRL_EXTHASH;
371 gfar_clear_exact_match(ndev);
376 rctrl &= ~RCTRL_PAL_MASK;
377 rctrl |= RCTRL_PADDING(priv->padding);
380 /* Insert receive time stamps into padding alignment bytes */
381 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
382 rctrl &= ~RCTRL_PAL_MASK;
383 rctrl |= RCTRL_PADDING(8);
387 /* Enable HW time stamping if requested from user space */
388 if (priv->hwts_rx_en)
389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
391 if (ndev->features & NETIF_F_HW_VLAN_RX)
392 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
394 /* Init rctrl based on our settings */
395 gfar_write(®s->rctrl, rctrl);
397 if (ndev->features & NETIF_F_IP_CSUM)
398 tctrl |= TCTRL_INIT_CSUM;
400 if (priv->prio_sched_en)
401 tctrl |= TCTRL_TXSCHED_PRIO;
403 tctrl |= TCTRL_TXSCHED_WRRS;
404 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
405 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
408 gfar_write(®s->tctrl, tctrl);
410 /* Set the extraction length and index */
411 attrs = ATTRELI_EL(priv->rx_stash_size) |
412 ATTRELI_EI(priv->rx_stash_index);
414 gfar_write(®s->attreli, attrs);
416 /* Start with defaults, and add stashing or locking
417 * depending on the approprate variables */
418 attrs = ATTR_INIT_SETTINGS;
420 if (priv->bd_stash_en)
421 attrs |= ATTR_BDSTASH;
423 if (priv->rx_stash_size != 0)
424 attrs |= ATTR_BUFSTASH;
426 gfar_write(®s->attr, attrs);
428 gfar_write(®s->fifo_tx_thr, priv->fifo_threshold);
429 gfar_write(®s->fifo_tx_starve, priv->fifo_starve);
430 gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off);
433 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
435 struct gfar_private *priv = netdev_priv(dev);
436 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
437 unsigned long tx_packets = 0, tx_bytes = 0;
440 for (i = 0; i < priv->num_rx_queues; i++) {
441 rx_packets += priv->rx_queue[i]->stats.rx_packets;
442 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
443 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
446 dev->stats.rx_packets = rx_packets;
447 dev->stats.rx_bytes = rx_bytes;
448 dev->stats.rx_dropped = rx_dropped;
450 for (i = 0; i < priv->num_tx_queues; i++) {
451 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
452 tx_packets += priv->tx_queue[i]->stats.tx_packets;
455 dev->stats.tx_bytes = tx_bytes;
456 dev->stats.tx_packets = tx_packets;
461 static const struct net_device_ops gfar_netdev_ops = {
462 .ndo_open = gfar_enet_open,
463 .ndo_start_xmit = gfar_start_xmit,
464 .ndo_stop = gfar_close,
465 .ndo_change_mtu = gfar_change_mtu,
466 .ndo_set_features = gfar_set_features,
467 .ndo_set_rx_mode = gfar_set_multi,
468 .ndo_tx_timeout = gfar_timeout,
469 .ndo_do_ioctl = gfar_ioctl,
470 .ndo_get_stats = gfar_get_stats,
471 .ndo_set_mac_address = eth_mac_addr,
472 .ndo_validate_addr = eth_validate_addr,
473 #ifdef CONFIG_NET_POLL_CONTROLLER
474 .ndo_poll_controller = gfar_netpoll,
478 void lock_rx_qs(struct gfar_private *priv)
482 for (i = 0; i < priv->num_rx_queues; i++)
483 spin_lock(&priv->rx_queue[i]->rxlock);
486 void lock_tx_qs(struct gfar_private *priv)
490 for (i = 0; i < priv->num_tx_queues; i++)
491 spin_lock(&priv->tx_queue[i]->txlock);
494 void unlock_rx_qs(struct gfar_private *priv)
498 for (i = 0; i < priv->num_rx_queues; i++)
499 spin_unlock(&priv->rx_queue[i]->rxlock);
502 void unlock_tx_qs(struct gfar_private *priv)
506 for (i = 0; i < priv->num_tx_queues; i++)
507 spin_unlock(&priv->tx_queue[i]->txlock);
510 static bool gfar_is_vlan_on(struct gfar_private *priv)
512 return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
513 (priv->ndev->features & NETIF_F_HW_VLAN_TX);
516 /* Returns 1 if incoming frames use an FCB */
517 static inline int gfar_uses_fcb(struct gfar_private *priv)
519 return gfar_is_vlan_on(priv) ||
520 (priv->ndev->features & NETIF_F_RXCSUM) ||
521 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
524 static void free_tx_pointers(struct gfar_private *priv)
528 for (i = 0; i < priv->num_tx_queues; i++)
529 kfree(priv->tx_queue[i]);
532 static void free_rx_pointers(struct gfar_private *priv)
536 for (i = 0; i < priv->num_rx_queues; i++)
537 kfree(priv->rx_queue[i]);
540 static void unmap_group_regs(struct gfar_private *priv)
544 for (i = 0; i < MAXGROUPS; i++)
545 if (priv->gfargrp[i].regs)
546 iounmap(priv->gfargrp[i].regs);
549 static void disable_napi(struct gfar_private *priv)
553 for (i = 0; i < priv->num_grps; i++)
554 napi_disable(&priv->gfargrp[i].napi);
557 static void enable_napi(struct gfar_private *priv)
561 for (i = 0; i < priv->num_grps; i++)
562 napi_enable(&priv->gfargrp[i].napi);
565 static int gfar_parse_group(struct device_node *np,
566 struct gfar_private *priv, const char *model)
570 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
571 if (!priv->gfargrp[priv->num_grps].regs)
574 priv->gfargrp[priv->num_grps].interruptTransmit =
575 irq_of_parse_and_map(np, 0);
577 /* If we aren't the FEC we have multiple interrupts */
578 if (model && strcasecmp(model, "FEC")) {
579 priv->gfargrp[priv->num_grps].interruptReceive =
580 irq_of_parse_and_map(np, 1);
581 priv->gfargrp[priv->num_grps].interruptError =
582 irq_of_parse_and_map(np,2);
583 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
584 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
585 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
589 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
590 priv->gfargrp[priv->num_grps].priv = priv;
591 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
592 if(priv->mode == MQ_MG_MODE) {
593 queue_mask = (u32 *)of_get_property(np,
594 "fsl,rx-bit-map", NULL);
595 priv->gfargrp[priv->num_grps].rx_bit_map =
596 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
597 queue_mask = (u32 *)of_get_property(np,
598 "fsl,tx-bit-map", NULL);
599 priv->gfargrp[priv->num_grps].tx_bit_map =
600 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
602 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
603 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
610 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
614 const void *mac_addr;
616 struct net_device *dev = NULL;
617 struct gfar_private *priv = NULL;
618 struct device_node *np = ofdev->dev.of_node;
619 struct device_node *child = NULL;
621 const u32 *stash_len;
622 const u32 *stash_idx;
623 unsigned int num_tx_qs, num_rx_qs;
624 u32 *tx_queues, *rx_queues;
626 if (!np || !of_device_is_available(np))
629 /* parse the num of tx and rx queues */
630 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
631 num_tx_qs = tx_queues ? *tx_queues : 1;
633 if (num_tx_qs > MAX_TX_QS) {
634 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
635 num_tx_qs, MAX_TX_QS);
636 pr_err("Cannot do alloc_etherdev, aborting\n");
640 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
641 num_rx_qs = rx_queues ? *rx_queues : 1;
643 if (num_rx_qs > MAX_RX_QS) {
644 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
645 num_rx_qs, MAX_RX_QS);
646 pr_err("Cannot do alloc_etherdev, aborting\n");
650 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
655 priv = netdev_priv(dev);
656 priv->node = ofdev->dev.of_node;
659 priv->num_tx_queues = num_tx_qs;
660 netif_set_real_num_rx_queues(dev, num_rx_qs);
661 priv->num_rx_queues = num_rx_qs;
662 priv->num_grps = 0x0;
664 /* Init Rx queue filer rule set linked list*/
665 INIT_LIST_HEAD(&priv->rx_list.list);
666 priv->rx_list.count = 0;
667 mutex_init(&priv->rx_queue_access);
669 model = of_get_property(np, "model", NULL);
671 for (i = 0; i < MAXGROUPS; i++)
672 priv->gfargrp[i].regs = NULL;
674 /* Parse and initialize group specific information */
675 if (of_device_is_compatible(np, "fsl,etsec2")) {
676 priv->mode = MQ_MG_MODE;
677 for_each_child_of_node(np, child) {
678 err = gfar_parse_group(child, priv, model);
683 priv->mode = SQ_SG_MODE;
684 err = gfar_parse_group(np, priv, model);
689 for (i = 0; i < priv->num_tx_queues; i++)
690 priv->tx_queue[i] = NULL;
691 for (i = 0; i < priv->num_rx_queues; i++)
692 priv->rx_queue[i] = NULL;
694 for (i = 0; i < priv->num_tx_queues; i++) {
695 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
697 if (!priv->tx_queue[i]) {
699 goto tx_alloc_failed;
701 priv->tx_queue[i]->tx_skbuff = NULL;
702 priv->tx_queue[i]->qindex = i;
703 priv->tx_queue[i]->dev = dev;
704 spin_lock_init(&(priv->tx_queue[i]->txlock));
707 for (i = 0; i < priv->num_rx_queues; i++) {
708 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
710 if (!priv->rx_queue[i]) {
712 goto rx_alloc_failed;
714 priv->rx_queue[i]->rx_skbuff = NULL;
715 priv->rx_queue[i]->qindex = i;
716 priv->rx_queue[i]->dev = dev;
717 spin_lock_init(&(priv->rx_queue[i]->rxlock));
721 stash = of_get_property(np, "bd-stash", NULL);
724 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
725 priv->bd_stash_en = 1;
728 stash_len = of_get_property(np, "rx-stash-len", NULL);
731 priv->rx_stash_size = *stash_len;
733 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
736 priv->rx_stash_index = *stash_idx;
738 if (stash_len || stash_idx)
739 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
741 mac_addr = of_get_mac_address(np);
743 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
745 if (model && !strcasecmp(model, "TSEC"))
747 FSL_GIANFAR_DEV_HAS_GIGABIT |
748 FSL_GIANFAR_DEV_HAS_COALESCE |
749 FSL_GIANFAR_DEV_HAS_RMON |
750 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
751 if (model && !strcasecmp(model, "eTSEC"))
753 FSL_GIANFAR_DEV_HAS_GIGABIT |
754 FSL_GIANFAR_DEV_HAS_COALESCE |
755 FSL_GIANFAR_DEV_HAS_RMON |
756 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
757 FSL_GIANFAR_DEV_HAS_PADDING |
758 FSL_GIANFAR_DEV_HAS_CSUM |
759 FSL_GIANFAR_DEV_HAS_VLAN |
760 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
761 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
762 FSL_GIANFAR_DEV_HAS_TIMER;
764 ctype = of_get_property(np, "phy-connection-type", NULL);
766 /* We only care about rgmii-id. The rest are autodetected */
767 if (ctype && !strcmp(ctype, "rgmii-id"))
768 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
770 priv->interface = PHY_INTERFACE_MODE_MII;
772 if (of_get_property(np, "fsl,magic-packet", NULL))
773 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
775 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
777 /* Find the TBI PHY. If it's not there, we don't support SGMII */
778 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
783 free_rx_pointers(priv);
785 free_tx_pointers(priv);
787 unmap_group_regs(priv);
792 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
793 struct ifreq *ifr, int cmd)
795 struct hwtstamp_config config;
796 struct gfar_private *priv = netdev_priv(netdev);
798 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
801 /* reserved for future extensions */
805 switch (config.tx_type) {
806 case HWTSTAMP_TX_OFF:
807 priv->hwts_tx_en = 0;
810 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
812 priv->hwts_tx_en = 1;
818 switch (config.rx_filter) {
819 case HWTSTAMP_FILTER_NONE:
820 if (priv->hwts_rx_en) {
822 priv->hwts_rx_en = 0;
823 startup_gfar(netdev);
827 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
829 if (!priv->hwts_rx_en) {
831 priv->hwts_rx_en = 1;
832 startup_gfar(netdev);
834 config.rx_filter = HWTSTAMP_FILTER_ALL;
838 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
842 /* Ioctl MII Interface */
843 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
845 struct gfar_private *priv = netdev_priv(dev);
847 if (!netif_running(dev))
850 if (cmd == SIOCSHWTSTAMP)
851 return gfar_hwtstamp_ioctl(dev, rq, cmd);
856 return phy_mii_ioctl(priv->phydev, rq, cmd);
859 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
861 unsigned int new_bit_map = 0x0;
862 int mask = 0x1 << (max_qs - 1), i;
863 for (i = 0; i < max_qs; i++) {
865 new_bit_map = new_bit_map + (1 << i);
871 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
874 u32 rqfpr = FPR_FILER_MASK;
878 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
879 priv->ftp_rqfpr[rqfar] = rqfpr;
880 priv->ftp_rqfcr[rqfar] = rqfcr;
881 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
884 rqfcr = RQFCR_CMP_NOMATCH;
885 priv->ftp_rqfpr[rqfar] = rqfpr;
886 priv->ftp_rqfcr[rqfar] = rqfcr;
887 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
890 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
892 priv->ftp_rqfcr[rqfar] = rqfcr;
893 priv->ftp_rqfpr[rqfar] = rqfpr;
894 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
897 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
899 priv->ftp_rqfcr[rqfar] = rqfcr;
900 priv->ftp_rqfpr[rqfar] = rqfpr;
901 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
906 static void gfar_init_filer_table(struct gfar_private *priv)
909 u32 rqfar = MAX_FILER_IDX;
911 u32 rqfpr = FPR_FILER_MASK;
914 rqfcr = RQFCR_CMP_MATCH;
915 priv->ftp_rqfcr[rqfar] = rqfcr;
916 priv->ftp_rqfpr[rqfar] = rqfpr;
917 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
919 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
920 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
921 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
922 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
923 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
924 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
926 /* cur_filer_idx indicated the first non-masked rule */
927 priv->cur_filer_idx = rqfar;
929 /* Rest are masked rules */
930 rqfcr = RQFCR_CMP_NOMATCH;
931 for (i = 0; i < rqfar; i++) {
932 priv->ftp_rqfcr[i] = rqfcr;
933 priv->ftp_rqfpr[i] = rqfpr;
934 gfar_write_filer(priv, i, rqfcr, rqfpr);
938 static void gfar_detect_errata(struct gfar_private *priv)
940 struct device *dev = &priv->ofdev->dev;
941 unsigned int pvr = mfspr(SPRN_PVR);
942 unsigned int svr = mfspr(SPRN_SVR);
943 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
944 unsigned int rev = svr & 0xffff;
946 /* MPC8313 Rev 2.0 and higher; All MPC837x */
947 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
948 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
949 priv->errata |= GFAR_ERRATA_74;
951 /* MPC8313 and MPC837x all rev */
952 if ((pvr == 0x80850010 && mod == 0x80b0) ||
953 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
954 priv->errata |= GFAR_ERRATA_76;
956 /* MPC8313 and MPC837x all rev */
957 if ((pvr == 0x80850010 && mod == 0x80b0) ||
958 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
959 priv->errata |= GFAR_ERRATA_A002;
961 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
962 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
963 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
964 priv->errata |= GFAR_ERRATA_12;
967 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
971 /* Set up the ethernet device structure, private data,
972 * and anything else we need before we start */
973 static int gfar_probe(struct platform_device *ofdev)
976 struct net_device *dev = NULL;
977 struct gfar_private *priv = NULL;
978 struct gfar __iomem *regs = NULL;
979 int err = 0, i, grp_idx = 0;
981 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
985 err = gfar_of_init(ofdev, &dev);
990 priv = netdev_priv(dev);
993 priv->node = ofdev->dev.of_node;
994 SET_NETDEV_DEV(dev, &ofdev->dev);
996 spin_lock_init(&priv->bflock);
997 INIT_WORK(&priv->reset_task, gfar_reset_task);
999 dev_set_drvdata(&ofdev->dev, priv);
1000 regs = priv->gfargrp[0].regs;
1002 gfar_detect_errata(priv);
1004 /* Stop the DMA engine now, in case it was running before */
1005 /* (The firmware could have used it, and left it running). */
1008 /* Reset MAC layer */
1009 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
1011 /* We need to delay at least 3 TX clocks */
1014 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1015 gfar_write(®s->maccfg1, tempval);
1017 /* Initialize MACCFG2. */
1018 tempval = MACCFG2_INIT_SETTINGS;
1019 if (gfar_has_errata(priv, GFAR_ERRATA_74))
1020 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1021 gfar_write(®s->maccfg2, tempval);
1023 /* Initialize ECNTRL */
1024 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
1026 /* Set the dev->base_addr to the gfar reg region */
1027 dev->base_addr = (unsigned long) regs;
1029 SET_NETDEV_DEV(dev, &ofdev->dev);
1031 /* Fill in the dev structure */
1032 dev->watchdog_timeo = TX_TIMEOUT;
1034 dev->netdev_ops = &gfar_netdev_ops;
1035 dev->ethtool_ops = &gfar_ethtool_ops;
1037 /* Register for napi ...We are registering NAPI for each grp */
1038 for (i = 0; i < priv->num_grps; i++)
1039 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1041 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1042 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1044 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1045 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1048 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1049 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1050 dev->features |= NETIF_F_HW_VLAN_RX;
1053 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1054 priv->extended_hash = 1;
1055 priv->hash_width = 9;
1057 priv->hash_regs[0] = ®s->igaddr0;
1058 priv->hash_regs[1] = ®s->igaddr1;
1059 priv->hash_regs[2] = ®s->igaddr2;
1060 priv->hash_regs[3] = ®s->igaddr3;
1061 priv->hash_regs[4] = ®s->igaddr4;
1062 priv->hash_regs[5] = ®s->igaddr5;
1063 priv->hash_regs[6] = ®s->igaddr6;
1064 priv->hash_regs[7] = ®s->igaddr7;
1065 priv->hash_regs[8] = ®s->gaddr0;
1066 priv->hash_regs[9] = ®s->gaddr1;
1067 priv->hash_regs[10] = ®s->gaddr2;
1068 priv->hash_regs[11] = ®s->gaddr3;
1069 priv->hash_regs[12] = ®s->gaddr4;
1070 priv->hash_regs[13] = ®s->gaddr5;
1071 priv->hash_regs[14] = ®s->gaddr6;
1072 priv->hash_regs[15] = ®s->gaddr7;
1075 priv->extended_hash = 0;
1076 priv->hash_width = 8;
1078 priv->hash_regs[0] = ®s->gaddr0;
1079 priv->hash_regs[1] = ®s->gaddr1;
1080 priv->hash_regs[2] = ®s->gaddr2;
1081 priv->hash_regs[3] = ®s->gaddr3;
1082 priv->hash_regs[4] = ®s->gaddr4;
1083 priv->hash_regs[5] = ®s->gaddr5;
1084 priv->hash_regs[6] = ®s->gaddr6;
1085 priv->hash_regs[7] = ®s->gaddr7;
1088 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1089 priv->padding = DEFAULT_PADDING;
1093 if (dev->features & NETIF_F_IP_CSUM ||
1094 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1095 dev->hard_header_len += GMAC_FCB_LEN;
1097 /* Program the isrg regs only if number of grps > 1 */
1098 if (priv->num_grps > 1) {
1099 baddr = ®s->isrg0;
1100 for (i = 0; i < priv->num_grps; i++) {
1101 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1102 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1103 gfar_write(baddr, isrg);
1109 /* Need to reverse the bit maps as bit_map's MSB is q0
1110 * but, for_each_set_bit parses from right to left, which
1111 * basically reverses the queue numbers */
1112 for (i = 0; i< priv->num_grps; i++) {
1113 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
1114 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1115 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
1116 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1119 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1120 * also assign queues to groups */
1121 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1122 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1123 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1124 priv->num_rx_queues) {
1125 priv->gfargrp[grp_idx].num_rx_queues++;
1126 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1127 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1128 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1130 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1131 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1132 priv->num_tx_queues) {
1133 priv->gfargrp[grp_idx].num_tx_queues++;
1134 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1135 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1136 tqueue = tqueue | (TQUEUE_EN0 >> i);
1138 priv->gfargrp[grp_idx].rstat = rstat;
1139 priv->gfargrp[grp_idx].tstat = tstat;
1143 gfar_write(®s->rqueue, rqueue);
1144 gfar_write(®s->tqueue, tqueue);
1146 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1148 /* Initializing some of the rx/tx queue level parameters */
1149 for (i = 0; i < priv->num_tx_queues; i++) {
1150 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1151 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1152 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1153 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1156 for (i = 0; i < priv->num_rx_queues; i++) {
1157 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1158 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1159 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1162 /* always enable rx filer*/
1163 priv->rx_filer_enable = 1;
1164 /* Enable most messages by default */
1165 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1166 /* use pritority h/w tx queue scheduling for single queue devices */
1167 if (priv->num_tx_queues == 1)
1168 priv->prio_sched_en = 1;
1170 /* Carrier starts down, phylib will bring it up */
1171 netif_carrier_off(dev);
1173 err = register_netdev(dev);
1176 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1180 device_init_wakeup(&dev->dev,
1181 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1183 /* fill out IRQ number and name fields */
1184 len_devname = strlen(dev->name);
1185 for (i = 0; i < priv->num_grps; i++) {
1186 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
1188 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1189 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
1190 "_g", sizeof("_g"));
1191 priv->gfargrp[i].int_name_tx[
1192 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1193 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1194 priv->gfargrp[i].int_name_tx)],
1195 "_tx", sizeof("_tx") + 1);
1197 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1199 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1200 "_g", sizeof("_g"));
1201 priv->gfargrp[i].int_name_rx[
1202 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1203 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1204 priv->gfargrp[i].int_name_rx)],
1205 "_rx", sizeof("_rx") + 1);
1207 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1209 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1210 "_g", sizeof("_g"));
1211 priv->gfargrp[i].int_name_er[strlen(
1212 priv->gfargrp[i].int_name_er)] = i+48;
1213 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1214 priv->gfargrp[i].int_name_er)],
1215 "_er", sizeof("_er") + 1);
1217 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1220 /* Initialize the filer table */
1221 gfar_init_filer_table(priv);
1223 /* Create all the sysfs files */
1224 gfar_init_sysfs(dev);
1226 /* Print out the device info */
1227 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1229 /* Even more device info helps when determining which kernel */
1230 /* provided which set of benchmarks. */
1231 netdev_info(dev, "Running with NAPI enabled\n");
1232 for (i = 0; i < priv->num_rx_queues; i++)
1233 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1234 i, priv->rx_queue[i]->rx_ring_size);
1235 for(i = 0; i < priv->num_tx_queues; i++)
1236 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1237 i, priv->tx_queue[i]->tx_ring_size);
1242 unmap_group_regs(priv);
1243 free_tx_pointers(priv);
1244 free_rx_pointers(priv);
1246 of_node_put(priv->phy_node);
1248 of_node_put(priv->tbi_node);
1253 static int gfar_remove(struct platform_device *ofdev)
1255 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1258 of_node_put(priv->phy_node);
1260 of_node_put(priv->tbi_node);
1262 dev_set_drvdata(&ofdev->dev, NULL);
1264 unregister_netdev(priv->ndev);
1265 unmap_group_regs(priv);
1266 free_netdev(priv->ndev);
1273 static int gfar_suspend(struct device *dev)
1275 struct gfar_private *priv = dev_get_drvdata(dev);
1276 struct net_device *ndev = priv->ndev;
1277 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1278 unsigned long flags;
1281 int magic_packet = priv->wol_en &&
1282 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1284 netif_device_detach(ndev);
1286 if (netif_running(ndev)) {
1288 local_irq_save(flags);
1292 gfar_halt_nodisable(ndev);
1294 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1295 tempval = gfar_read(®s->maccfg1);
1297 tempval &= ~MACCFG1_TX_EN;
1300 tempval &= ~MACCFG1_RX_EN;
1302 gfar_write(®s->maccfg1, tempval);
1306 local_irq_restore(flags);
1311 /* Enable interrupt on Magic Packet */
1312 gfar_write(®s->imask, IMASK_MAG);
1314 /* Enable Magic Packet mode */
1315 tempval = gfar_read(®s->maccfg2);
1316 tempval |= MACCFG2_MPEN;
1317 gfar_write(®s->maccfg2, tempval);
1319 phy_stop(priv->phydev);
1326 static int gfar_resume(struct device *dev)
1328 struct gfar_private *priv = dev_get_drvdata(dev);
1329 struct net_device *ndev = priv->ndev;
1330 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1331 unsigned long flags;
1333 int magic_packet = priv->wol_en &&
1334 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1336 if (!netif_running(ndev)) {
1337 netif_device_attach(ndev);
1341 if (!magic_packet && priv->phydev)
1342 phy_start(priv->phydev);
1344 /* Disable Magic Packet mode, in case something
1347 local_irq_save(flags);
1351 tempval = gfar_read(®s->maccfg2);
1352 tempval &= ~MACCFG2_MPEN;
1353 gfar_write(®s->maccfg2, tempval);
1359 local_irq_restore(flags);
1361 netif_device_attach(ndev);
1368 static int gfar_restore(struct device *dev)
1370 struct gfar_private *priv = dev_get_drvdata(dev);
1371 struct net_device *ndev = priv->ndev;
1373 if (!netif_running(ndev))
1376 gfar_init_bds(ndev);
1377 init_registers(ndev);
1378 gfar_set_mac_address(ndev);
1379 gfar_init_mac(ndev);
1384 priv->oldduplex = -1;
1387 phy_start(priv->phydev);
1389 netif_device_attach(ndev);
1395 static struct dev_pm_ops gfar_pm_ops = {
1396 .suspend = gfar_suspend,
1397 .resume = gfar_resume,
1398 .freeze = gfar_suspend,
1399 .thaw = gfar_resume,
1400 .restore = gfar_restore,
1403 #define GFAR_PM_OPS (&gfar_pm_ops)
1407 #define GFAR_PM_OPS NULL
1411 /* Reads the controller's registers to determine what interface
1412 * connects it to the PHY.
1414 static phy_interface_t gfar_get_interface(struct net_device *dev)
1416 struct gfar_private *priv = netdev_priv(dev);
1417 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1420 ecntrl = gfar_read(®s->ecntrl);
1422 if (ecntrl & ECNTRL_SGMII_MODE)
1423 return PHY_INTERFACE_MODE_SGMII;
1425 if (ecntrl & ECNTRL_TBI_MODE) {
1426 if (ecntrl & ECNTRL_REDUCED_MODE)
1427 return PHY_INTERFACE_MODE_RTBI;
1429 return PHY_INTERFACE_MODE_TBI;
1432 if (ecntrl & ECNTRL_REDUCED_MODE) {
1433 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
1434 return PHY_INTERFACE_MODE_RMII;
1436 phy_interface_t interface = priv->interface;
1439 * This isn't autodetected right now, so it must
1440 * be set by the device tree or platform code.
1442 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1443 return PHY_INTERFACE_MODE_RGMII_ID;
1445 return PHY_INTERFACE_MODE_RGMII;
1449 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1450 return PHY_INTERFACE_MODE_GMII;
1452 return PHY_INTERFACE_MODE_MII;
1456 /* Initializes driver's PHY state, and attaches to the PHY.
1457 * Returns 0 on success.
1459 static int init_phy(struct net_device *dev)
1461 struct gfar_private *priv = netdev_priv(dev);
1462 uint gigabit_support =
1463 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1464 SUPPORTED_1000baseT_Full : 0;
1465 phy_interface_t interface;
1469 priv->oldduplex = -1;
1471 interface = gfar_get_interface(dev);
1473 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1476 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1478 if (!priv->phydev) {
1479 dev_err(&dev->dev, "could not attach to PHY\n");
1483 if (interface == PHY_INTERFACE_MODE_SGMII)
1484 gfar_configure_serdes(dev);
1486 /* Remove any features not supported by the controller */
1487 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1488 priv->phydev->advertising = priv->phydev->supported;
1494 * Initialize TBI PHY interface for communicating with the
1495 * SERDES lynx PHY on the chip. We communicate with this PHY
1496 * through the MDIO bus on each controller, treating it as a
1497 * "normal" PHY at the address found in the TBIPA register. We assume
1498 * that the TBIPA register is valid. Either the MDIO bus code will set
1499 * it to a value that doesn't conflict with other PHYs on the bus, or the
1500 * value doesn't matter, as there are no other PHYs on the bus.
1502 static void gfar_configure_serdes(struct net_device *dev)
1504 struct gfar_private *priv = netdev_priv(dev);
1505 struct phy_device *tbiphy;
1507 if (!priv->tbi_node) {
1508 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1509 "device tree specify a tbi-handle\n");
1513 tbiphy = of_phy_find_device(priv->tbi_node);
1515 dev_err(&dev->dev, "error: Could not get TBI device\n");
1520 * If the link is already up, we must already be ok, and don't need to
1521 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1522 * everything for us? Resetting it takes the link down and requires
1523 * several seconds for it to come back.
1525 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1528 /* Single clk mode, mii mode off(for serdes communication) */
1529 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1531 phy_write(tbiphy, MII_ADVERTISE,
1532 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1533 ADVERTISE_1000XPSE_ASYM);
1535 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
1536 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
1539 static void init_registers(struct net_device *dev)
1541 struct gfar_private *priv = netdev_priv(dev);
1542 struct gfar __iomem *regs = NULL;
1545 for (i = 0; i < priv->num_grps; i++) {
1546 regs = priv->gfargrp[i].regs;
1548 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1550 /* Initialize IMASK */
1551 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1554 regs = priv->gfargrp[0].regs;
1555 /* Init hash registers to zero */
1556 gfar_write(®s->igaddr0, 0);
1557 gfar_write(®s->igaddr1, 0);
1558 gfar_write(®s->igaddr2, 0);
1559 gfar_write(®s->igaddr3, 0);
1560 gfar_write(®s->igaddr4, 0);
1561 gfar_write(®s->igaddr5, 0);
1562 gfar_write(®s->igaddr6, 0);
1563 gfar_write(®s->igaddr7, 0);
1565 gfar_write(®s->gaddr0, 0);
1566 gfar_write(®s->gaddr1, 0);
1567 gfar_write(®s->gaddr2, 0);
1568 gfar_write(®s->gaddr3, 0);
1569 gfar_write(®s->gaddr4, 0);
1570 gfar_write(®s->gaddr5, 0);
1571 gfar_write(®s->gaddr6, 0);
1572 gfar_write(®s->gaddr7, 0);
1574 /* Zero out the rmon mib registers if it has them */
1575 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1576 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1578 /* Mask off the CAM interrupts */
1579 gfar_write(®s->rmon.cam1, 0xffffffff);
1580 gfar_write(®s->rmon.cam2, 0xffffffff);
1583 /* Initialize the max receive buffer length */
1584 gfar_write(®s->mrblr, priv->rx_buffer_size);
1586 /* Initialize the Minimum Frame Length Register */
1587 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
1590 static int __gfar_is_rx_idle(struct gfar_private *priv)
1595 * Normaly TSEC should not hang on GRS commands, so we should
1596 * actually wait for IEVENT_GRSC flag.
1598 if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1602 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1603 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1604 * and the Rx can be safely reset.
1606 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1608 if ((res & 0xffff) == (res >> 16))
1614 /* Halt the receive and transmit queues */
1615 static void gfar_halt_nodisable(struct net_device *dev)
1617 struct gfar_private *priv = netdev_priv(dev);
1618 struct gfar __iomem *regs = NULL;
1622 for (i = 0; i < priv->num_grps; i++) {
1623 regs = priv->gfargrp[i].regs;
1624 /* Mask all interrupts */
1625 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1627 /* Clear all interrupts */
1628 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
1631 regs = priv->gfargrp[0].regs;
1632 /* Stop the DMA, and wait for it to stop */
1633 tempval = gfar_read(®s->dmactrl);
1634 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
1635 != (DMACTRL_GRS | DMACTRL_GTS)) {
1638 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1639 gfar_write(®s->dmactrl, tempval);
1642 ret = spin_event_timeout(((gfar_read(®s->ievent) &
1643 (IEVENT_GRSC | IEVENT_GTSC)) ==
1644 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1645 if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC))
1646 ret = __gfar_is_rx_idle(priv);
1651 /* Halt the receive and transmit queues */
1652 void gfar_halt(struct net_device *dev)
1654 struct gfar_private *priv = netdev_priv(dev);
1655 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1658 gfar_halt_nodisable(dev);
1660 /* Disable Rx and Tx */
1661 tempval = gfar_read(®s->maccfg1);
1662 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1663 gfar_write(®s->maccfg1, tempval);
1666 static void free_grp_irqs(struct gfar_priv_grp *grp)
1668 free_irq(grp->interruptError, grp);
1669 free_irq(grp->interruptTransmit, grp);
1670 free_irq(grp->interruptReceive, grp);
1673 void stop_gfar(struct net_device *dev)
1675 struct gfar_private *priv = netdev_priv(dev);
1676 unsigned long flags;
1679 phy_stop(priv->phydev);
1683 local_irq_save(flags);
1691 local_irq_restore(flags);
1694 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1695 for (i = 0; i < priv->num_grps; i++)
1696 free_grp_irqs(&priv->gfargrp[i]);
1698 for (i = 0; i < priv->num_grps; i++)
1699 free_irq(priv->gfargrp[i].interruptTransmit,
1703 free_skb_resources(priv);
1706 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1708 struct txbd8 *txbdp;
1709 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1712 txbdp = tx_queue->tx_bd_base;
1714 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1715 if (!tx_queue->tx_skbuff[i])
1718 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1719 txbdp->length, DMA_TO_DEVICE);
1721 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1724 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1725 txbdp->length, DMA_TO_DEVICE);
1728 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1729 tx_queue->tx_skbuff[i] = NULL;
1731 kfree(tx_queue->tx_skbuff);
1734 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1736 struct rxbd8 *rxbdp;
1737 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1740 rxbdp = rx_queue->rx_bd_base;
1742 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1743 if (rx_queue->rx_skbuff[i]) {
1744 dma_unmap_single(&priv->ofdev->dev,
1745 rxbdp->bufPtr, priv->rx_buffer_size,
1747 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1748 rx_queue->rx_skbuff[i] = NULL;
1754 kfree(rx_queue->rx_skbuff);
1757 /* If there are any tx skbs or rx skbs still around, free them.
1758 * Then free tx_skbuff and rx_skbuff */
1759 static void free_skb_resources(struct gfar_private *priv)
1761 struct gfar_priv_tx_q *tx_queue = NULL;
1762 struct gfar_priv_rx_q *rx_queue = NULL;
1765 /* Go through all the buffer descriptors and free their data buffers */
1766 for (i = 0; i < priv->num_tx_queues; i++) {
1767 tx_queue = priv->tx_queue[i];
1768 if(tx_queue->tx_skbuff)
1769 free_skb_tx_queue(tx_queue);
1772 for (i = 0; i < priv->num_rx_queues; i++) {
1773 rx_queue = priv->rx_queue[i];
1774 if(rx_queue->rx_skbuff)
1775 free_skb_rx_queue(rx_queue);
1778 dma_free_coherent(&priv->ofdev->dev,
1779 sizeof(struct txbd8) * priv->total_tx_ring_size +
1780 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1781 priv->tx_queue[0]->tx_bd_base,
1782 priv->tx_queue[0]->tx_bd_dma_base);
1783 skb_queue_purge(&priv->rx_recycle);
1786 void gfar_start(struct net_device *dev)
1788 struct gfar_private *priv = netdev_priv(dev);
1789 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1793 /* Enable Rx and Tx in MACCFG1 */
1794 tempval = gfar_read(®s->maccfg1);
1795 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1796 gfar_write(®s->maccfg1, tempval);
1798 /* Initialize DMACTRL to have WWR and WOP */
1799 tempval = gfar_read(®s->dmactrl);
1800 tempval |= DMACTRL_INIT_SETTINGS;
1801 gfar_write(®s->dmactrl, tempval);
1803 /* Make sure we aren't stopped */
1804 tempval = gfar_read(®s->dmactrl);
1805 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1806 gfar_write(®s->dmactrl, tempval);
1808 for (i = 0; i < priv->num_grps; i++) {
1809 regs = priv->gfargrp[i].regs;
1810 /* Clear THLT/RHLT, so that the DMA starts polling now */
1811 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1812 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1813 /* Unmask the interrupts we look for */
1814 gfar_write(®s->imask, IMASK_DEFAULT);
1817 dev->trans_start = jiffies; /* prevent tx timeout */
1820 void gfar_configure_coalescing(struct gfar_private *priv,
1821 unsigned long tx_mask, unsigned long rx_mask)
1823 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1827 /* Backward compatible case ---- even if we enable
1828 * multiple queues, there's only single reg to program
1830 gfar_write(®s->txic, 0);
1831 if(likely(priv->tx_queue[0]->txcoalescing))
1832 gfar_write(®s->txic, priv->tx_queue[0]->txic);
1834 gfar_write(®s->rxic, 0);
1835 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1836 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
1838 if (priv->mode == MQ_MG_MODE) {
1839 baddr = ®s->txic0;
1840 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1841 if (likely(priv->tx_queue[i]->txcoalescing)) {
1842 gfar_write(baddr + i, 0);
1843 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1847 baddr = ®s->rxic0;
1848 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1849 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1850 gfar_write(baddr + i, 0);
1851 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1857 static int register_grp_irqs(struct gfar_priv_grp *grp)
1859 struct gfar_private *priv = grp->priv;
1860 struct net_device *dev = priv->ndev;
1863 /* If the device has multiple interrupts, register for
1864 * them. Otherwise, only register for the one */
1865 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1866 /* Install our interrupt handlers for Error,
1867 * Transmit, and Receive */
1868 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1869 grp->int_name_er,grp)) < 0) {
1870 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1871 grp->interruptError);
1876 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1877 0, grp->int_name_tx, grp)) < 0) {
1878 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1879 grp->interruptTransmit);
1883 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1884 grp->int_name_rx, grp)) < 0) {
1885 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1886 grp->interruptReceive);
1890 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1891 grp->int_name_tx, grp)) < 0) {
1892 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1893 grp->interruptTransmit);
1901 free_irq(grp->interruptTransmit, grp);
1903 free_irq(grp->interruptError, grp);
1909 /* Bring the controller up and running */
1910 int startup_gfar(struct net_device *ndev)
1912 struct gfar_private *priv = netdev_priv(ndev);
1913 struct gfar __iomem *regs = NULL;
1916 for (i = 0; i < priv->num_grps; i++) {
1917 regs= priv->gfargrp[i].regs;
1918 gfar_write(®s->imask, IMASK_INIT_CLEAR);
1921 regs= priv->gfargrp[0].regs;
1922 err = gfar_alloc_skb_resources(ndev);
1926 gfar_init_mac(ndev);
1928 for (i = 0; i < priv->num_grps; i++) {
1929 err = register_grp_irqs(&priv->gfargrp[i]);
1931 for (j = 0; j < i; j++)
1932 free_grp_irqs(&priv->gfargrp[j]);
1937 /* Start the controller */
1940 phy_start(priv->phydev);
1942 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1947 free_skb_resources(priv);
1951 /* Called when something needs to use the ethernet device */
1952 /* Returns 0 for success. */
1953 static int gfar_enet_open(struct net_device *dev)
1955 struct gfar_private *priv = netdev_priv(dev);
1960 skb_queue_head_init(&priv->rx_recycle);
1962 /* Initialize a bunch of registers */
1963 init_registers(dev);
1965 gfar_set_mac_address(dev);
1967 err = init_phy(dev);
1974 err = startup_gfar(dev);
1980 netif_tx_start_all_queues(dev);
1982 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1987 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1989 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1991 memset(fcb, 0, GMAC_FCB_LEN);
1996 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
2000 /* If we're here, it's a IP packet with a TCP or UDP
2001 * payload. We set it to checksum, using a pseudo-header
2004 flags = TXFCB_DEFAULT;
2006 /* Tell the controller what the protocol is */
2007 /* And provide the already calculated phcs */
2008 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2010 fcb->phcs = udp_hdr(skb)->check;
2012 fcb->phcs = tcp_hdr(skb)->check;
2014 /* l3os is the distance between the start of the
2015 * frame (skb->data) and the start of the IP hdr.
2016 * l4os is the distance between the start of the
2017 * l3 hdr and the l4 hdr */
2018 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
2019 fcb->l4os = skb_network_header_len(skb);
2024 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2026 fcb->flags |= TXFCB_VLN;
2027 fcb->vlctl = vlan_tx_tag_get(skb);
2030 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2031 struct txbd8 *base, int ring_size)
2033 struct txbd8 *new_bd = bdp + stride;
2035 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2038 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2041 return skip_txbd(bdp, 1, base, ring_size);
2044 /* This is called by the kernel when a frame is ready for transmission. */
2045 /* It is pointed to by the dev->hard_start_xmit function pointer */
2046 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2048 struct gfar_private *priv = netdev_priv(dev);
2049 struct gfar_priv_tx_q *tx_queue = NULL;
2050 struct netdev_queue *txq;
2051 struct gfar __iomem *regs = NULL;
2052 struct txfcb *fcb = NULL;
2053 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2055 int i, rq = 0, do_tstamp = 0;
2057 unsigned long flags;
2058 unsigned int nr_frags, nr_txbds, length;
2061 * TOE=1 frames larger than 2500 bytes may see excess delays
2062 * before start of transmission.
2064 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2065 skb->ip_summed == CHECKSUM_PARTIAL &&
2069 ret = skb_checksum_help(skb);
2074 rq = skb->queue_mapping;
2075 tx_queue = priv->tx_queue[rq];
2076 txq = netdev_get_tx_queue(dev, rq);
2077 base = tx_queue->tx_bd_base;
2078 regs = tx_queue->grp->regs;
2080 /* check if time stamp should be generated */
2081 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2085 /* make space for additional header when fcb is needed */
2086 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2087 vlan_tx_tag_present(skb) ||
2088 unlikely(do_tstamp)) &&
2089 (skb_headroom(skb) < GMAC_FCB_LEN)) {
2090 struct sk_buff *skb_new;
2092 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
2094 dev->stats.tx_errors++;
2096 return NETDEV_TX_OK;
2102 /* total number of fragments in the SKB */
2103 nr_frags = skb_shinfo(skb)->nr_frags;
2105 /* calculate the required number of TxBDs for this skb */
2106 if (unlikely(do_tstamp))
2107 nr_txbds = nr_frags + 2;
2109 nr_txbds = nr_frags + 1;
2111 /* check if there is space to queue this packet */
2112 if (nr_txbds > tx_queue->num_txbdfree) {
2113 /* no space, stop the queue */
2114 netif_tx_stop_queue(txq);
2115 dev->stats.tx_fifo_errors++;
2116 return NETDEV_TX_BUSY;
2119 /* Update transmit stats */
2120 tx_queue->stats.tx_bytes += skb->len;
2121 tx_queue->stats.tx_packets++;
2123 txbdp = txbdp_start = tx_queue->cur_tx;
2124 lstatus = txbdp->lstatus;
2126 /* Time stamp insertion requires one additional TxBD */
2127 if (unlikely(do_tstamp))
2128 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2129 tx_queue->tx_ring_size);
2131 if (nr_frags == 0) {
2132 if (unlikely(do_tstamp))
2133 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2136 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2138 /* Place the fragment addresses and lengths into the TxBDs */
2139 for (i = 0; i < nr_frags; i++) {
2140 /* Point at the next BD, wrapping as needed */
2141 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2143 length = skb_shinfo(skb)->frags[i].size;
2145 lstatus = txbdp->lstatus | length |
2146 BD_LFLAG(TXBD_READY);
2148 /* Handle the last BD specially */
2149 if (i == nr_frags - 1)
2150 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2152 bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2153 &skb_shinfo(skb)->frags[i],
2158 /* set the TxBD length and buffer pointer */
2159 txbdp->bufPtr = bufaddr;
2160 txbdp->lstatus = lstatus;
2163 lstatus = txbdp_start->lstatus;
2166 /* Set up checksumming */
2167 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2168 fcb = gfar_add_fcb(skb);
2169 /* as specified by errata */
2170 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2171 && ((unsigned long)fcb % 0x20) > 0x18)) {
2172 __skb_pull(skb, GMAC_FCB_LEN);
2173 skb_checksum_help(skb);
2175 lstatus |= BD_LFLAG(TXBD_TOE);
2176 gfar_tx_checksum(skb, fcb);
2180 if (vlan_tx_tag_present(skb)) {
2181 if (unlikely(NULL == fcb)) {
2182 fcb = gfar_add_fcb(skb);
2183 lstatus |= BD_LFLAG(TXBD_TOE);
2186 gfar_tx_vlan(skb, fcb);
2189 /* Setup tx hardware time stamping if requested */
2190 if (unlikely(do_tstamp)) {
2191 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2193 fcb = gfar_add_fcb(skb);
2195 lstatus |= BD_LFLAG(TXBD_TOE);
2198 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2199 skb_headlen(skb), DMA_TO_DEVICE);
2202 * If time stamping is requested one additional TxBD must be set up. The
2203 * first TxBD points to the FCB and must have a data length of
2204 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2205 * the full frame length.
2207 if (unlikely(do_tstamp)) {
2208 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2209 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2210 (skb_headlen(skb) - GMAC_FCB_LEN);
2211 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2213 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2217 * We can work in parallel with gfar_clean_tx_ring(), except
2218 * when modifying num_txbdfree. Note that we didn't grab the lock
2219 * when we were reading the num_txbdfree and checking for available
2220 * space, that's because outside of this function it can only grow,
2221 * and once we've got needed space, it cannot suddenly disappear.
2223 * The lock also protects us from gfar_error(), which can modify
2224 * regs->tstat and thus retrigger the transfers, which is why we
2225 * also must grab the lock before setting ready bit for the first
2226 * to be transmitted BD.
2228 spin_lock_irqsave(&tx_queue->txlock, flags);
2231 * The powerpc-specific eieio() is used, as wmb() has too strong
2232 * semantics (it requires synchronization between cacheable and
2233 * uncacheable mappings, which eieio doesn't provide and which we
2234 * don't need), thus requiring a more expensive sync instruction. At
2235 * some point, the set of architecture-independent barrier functions
2236 * should be expanded to include weaker barriers.
2240 txbdp_start->lstatus = lstatus;
2242 eieio(); /* force lstatus write before tx_skbuff */
2244 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2246 /* Update the current skb pointer to the next entry we will use
2247 * (wrapping if necessary) */
2248 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2249 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2251 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2253 /* reduce TxBD free count */
2254 tx_queue->num_txbdfree -= (nr_txbds);
2256 /* If the next BD still needs to be cleaned up, then the bds
2257 are full. We need to tell the kernel to stop sending us stuff. */
2258 if (!tx_queue->num_txbdfree) {
2259 netif_tx_stop_queue(txq);
2261 dev->stats.tx_fifo_errors++;
2264 /* Tell the DMA to go go go */
2265 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2268 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2270 return NETDEV_TX_OK;
2273 /* Stops the kernel queue, and halts the controller */
2274 static int gfar_close(struct net_device *dev)
2276 struct gfar_private *priv = netdev_priv(dev);
2280 cancel_work_sync(&priv->reset_task);
2283 /* Disconnect from the PHY */
2284 phy_disconnect(priv->phydev);
2285 priv->phydev = NULL;
2287 netif_tx_stop_all_queues(dev);
2292 /* Changes the mac address if the controller is not running. */
2293 static int gfar_set_mac_address(struct net_device *dev)
2295 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2300 /* Check if rx parser should be activated */
2301 void gfar_check_rx_parser_mode(struct gfar_private *priv)
2303 struct gfar __iomem *regs;
2306 regs = priv->gfargrp[0].regs;
2308 tempval = gfar_read(®s->rctrl);
2309 /* If parse is no longer required, then disable parser */
2310 if (tempval & RCTRL_REQ_PARSER)
2311 tempval |= RCTRL_PRSDEP_INIT;
2313 tempval &= ~RCTRL_PRSDEP_INIT;
2314 gfar_write(®s->rctrl, tempval);
2317 /* Enables and disables VLAN insertion/extraction */
2318 void gfar_vlan_mode(struct net_device *dev, u32 features)
2320 struct gfar_private *priv = netdev_priv(dev);
2321 struct gfar __iomem *regs = NULL;
2322 unsigned long flags;
2325 regs = priv->gfargrp[0].regs;
2326 local_irq_save(flags);
2329 if (features & NETIF_F_HW_VLAN_TX) {
2330 /* Enable VLAN tag insertion */
2331 tempval = gfar_read(®s->tctrl);
2332 tempval |= TCTRL_VLINS;
2333 gfar_write(®s->tctrl, tempval);
2335 /* Disable VLAN tag insertion */
2336 tempval = gfar_read(®s->tctrl);
2337 tempval &= ~TCTRL_VLINS;
2338 gfar_write(®s->tctrl, tempval);
2341 if (features & NETIF_F_HW_VLAN_RX) {
2342 /* Enable VLAN tag extraction */
2343 tempval = gfar_read(®s->rctrl);
2344 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2345 gfar_write(®s->rctrl, tempval);
2347 /* Disable VLAN tag extraction */
2348 tempval = gfar_read(®s->rctrl);
2349 tempval &= ~RCTRL_VLEX;
2350 gfar_write(®s->rctrl, tempval);
2352 gfar_check_rx_parser_mode(priv);
2355 gfar_change_mtu(dev, dev->mtu);
2358 local_irq_restore(flags);
2361 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2363 int tempsize, tempval;
2364 struct gfar_private *priv = netdev_priv(dev);
2365 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2366 int oldsize = priv->rx_buffer_size;
2367 int frame_size = new_mtu + ETH_HLEN;
2369 if (gfar_is_vlan_on(priv))
2370 frame_size += VLAN_HLEN;
2372 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2373 netif_err(priv, drv, dev, "Invalid MTU setting\n");
2377 if (gfar_uses_fcb(priv))
2378 frame_size += GMAC_FCB_LEN;
2380 frame_size += priv->padding;
2383 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2384 INCREMENTAL_BUFFER_SIZE;
2386 /* Only stop and start the controller if it isn't already
2387 * stopped, and we changed something */
2388 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2391 priv->rx_buffer_size = tempsize;
2395 gfar_write(®s->mrblr, priv->rx_buffer_size);
2396 gfar_write(®s->maxfrm, priv->rx_buffer_size);
2398 /* If the mtu is larger than the max size for standard
2399 * ethernet frames (ie, a jumbo frame), then set maccfg2
2400 * to allow huge frames, and to check the length */
2401 tempval = gfar_read(®s->maccfg2);
2403 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2404 gfar_has_errata(priv, GFAR_ERRATA_74))
2405 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2407 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2409 gfar_write(®s->maccfg2, tempval);
2411 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2417 /* gfar_reset_task gets scheduled when a packet has not been
2418 * transmitted after a set amount of time.
2419 * For now, assume that clearing out all the structures, and
2420 * starting over will fix the problem.
2422 static void gfar_reset_task(struct work_struct *work)
2424 struct gfar_private *priv = container_of(work, struct gfar_private,
2426 struct net_device *dev = priv->ndev;
2428 if (dev->flags & IFF_UP) {
2429 netif_tx_stop_all_queues(dev);
2432 netif_tx_start_all_queues(dev);
2435 netif_tx_schedule_all(dev);
2438 static void gfar_timeout(struct net_device *dev)
2440 struct gfar_private *priv = netdev_priv(dev);
2442 dev->stats.tx_errors++;
2443 schedule_work(&priv->reset_task);
2446 static void gfar_align_skb(struct sk_buff *skb)
2448 /* We need the data buffer to be aligned properly. We will reserve
2449 * as many bytes as needed to align the data properly
2451 skb_reserve(skb, RXBUF_ALIGNMENT -
2452 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2455 /* Interrupt Handler for Transmit complete */
2456 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2458 struct net_device *dev = tx_queue->dev;
2459 struct gfar_private *priv = netdev_priv(dev);
2460 struct gfar_priv_rx_q *rx_queue = NULL;
2461 struct txbd8 *bdp, *next = NULL;
2462 struct txbd8 *lbdp = NULL;
2463 struct txbd8 *base = tx_queue->tx_bd_base;
2464 struct sk_buff *skb;
2466 int tx_ring_size = tx_queue->tx_ring_size;
2467 int frags = 0, nr_txbds = 0;
2473 rx_queue = priv->rx_queue[tx_queue->qindex];
2474 bdp = tx_queue->dirty_tx;
2475 skb_dirtytx = tx_queue->skb_dirtytx;
2477 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2478 unsigned long flags;
2480 frags = skb_shinfo(skb)->nr_frags;
2483 * When time stamping, one additional TxBD must be freed.
2484 * Also, we need to dma_unmap_single() the TxPAL.
2486 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2487 nr_txbds = frags + 2;
2489 nr_txbds = frags + 1;
2491 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2493 lstatus = lbdp->lstatus;
2495 /* Only clean completed frames */
2496 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2497 (lstatus & BD_LENGTH_MASK))
2500 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2501 next = next_txbd(bdp, base, tx_ring_size);
2502 buflen = next->length + GMAC_FCB_LEN;
2504 buflen = bdp->length;
2506 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2507 buflen, DMA_TO_DEVICE);
2509 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2510 struct skb_shared_hwtstamps shhwtstamps;
2511 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2512 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2513 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2514 skb_tstamp_tx(skb, &shhwtstamps);
2515 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2519 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2520 bdp = next_txbd(bdp, base, tx_ring_size);
2522 for (i = 0; i < frags; i++) {
2523 dma_unmap_page(&priv->ofdev->dev,
2527 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2528 bdp = next_txbd(bdp, base, tx_ring_size);
2532 * If there's room in the queue (limit it to rx_buffer_size)
2533 * we add this skb back into the pool, if it's the right size
2535 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2536 skb_recycle_check(skb, priv->rx_buffer_size +
2538 gfar_align_skb(skb);
2539 skb_queue_head(&priv->rx_recycle, skb);
2541 dev_kfree_skb_any(skb);
2543 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2545 skb_dirtytx = (skb_dirtytx + 1) &
2546 TX_RING_MOD_MASK(tx_ring_size);
2549 spin_lock_irqsave(&tx_queue->txlock, flags);
2550 tx_queue->num_txbdfree += nr_txbds;
2551 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2554 /* If we freed a buffer, we can restart transmission, if necessary */
2555 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
2556 netif_wake_subqueue(dev, tx_queue->qindex);
2558 /* Update dirty indicators */
2559 tx_queue->skb_dirtytx = skb_dirtytx;
2560 tx_queue->dirty_tx = bdp;
2565 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2567 unsigned long flags;
2569 spin_lock_irqsave(&gfargrp->grplock, flags);
2570 if (napi_schedule_prep(&gfargrp->napi)) {
2571 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2572 __napi_schedule(&gfargrp->napi);
2575 * Clear IEVENT, so interrupts aren't called again
2576 * because of the packets that have already arrived.
2578 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2580 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2584 /* Interrupt Handler for Transmit complete */
2585 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2587 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2591 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2592 struct sk_buff *skb)
2594 struct net_device *dev = rx_queue->dev;
2595 struct gfar_private *priv = netdev_priv(dev);
2598 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2599 priv->rx_buffer_size, DMA_FROM_DEVICE);
2600 gfar_init_rxbdp(rx_queue, bdp, buf);
2603 static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
2605 struct gfar_private *priv = netdev_priv(dev);
2606 struct sk_buff *skb = NULL;
2608 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2612 gfar_align_skb(skb);
2617 struct sk_buff * gfar_new_skb(struct net_device *dev)
2619 struct gfar_private *priv = netdev_priv(dev);
2620 struct sk_buff *skb = NULL;
2622 skb = skb_dequeue(&priv->rx_recycle);
2624 skb = gfar_alloc_skb(dev);
2629 static inline void count_errors(unsigned short status, struct net_device *dev)
2631 struct gfar_private *priv = netdev_priv(dev);
2632 struct net_device_stats *stats = &dev->stats;
2633 struct gfar_extra_stats *estats = &priv->extra_stats;
2635 /* If the packet was truncated, none of the other errors
2637 if (status & RXBD_TRUNCATED) {
2638 stats->rx_length_errors++;
2644 /* Count the errors, if there were any */
2645 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2646 stats->rx_length_errors++;
2648 if (status & RXBD_LARGE)
2653 if (status & RXBD_NONOCTET) {
2654 stats->rx_frame_errors++;
2655 estats->rx_nonoctet++;
2657 if (status & RXBD_CRCERR) {
2658 estats->rx_crcerr++;
2659 stats->rx_crc_errors++;
2661 if (status & RXBD_OVERRUN) {
2662 estats->rx_overrun++;
2663 stats->rx_crc_errors++;
2667 irqreturn_t gfar_receive(int irq, void *grp_id)
2669 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2673 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2675 /* If valid headers were found, and valid sums
2676 * were verified, then we tell the kernel that no
2677 * checksumming is necessary. Otherwise, it is */
2678 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2679 skb->ip_summed = CHECKSUM_UNNECESSARY;
2681 skb_checksum_none_assert(skb);
2685 /* gfar_process_frame() -- handle one incoming packet if skb
2687 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2690 struct gfar_private *priv = netdev_priv(dev);
2691 struct rxfcb *fcb = NULL;
2695 /* fcb is at the beginning if exists */
2696 fcb = (struct rxfcb *)skb->data;
2698 /* Remove the FCB from the skb */
2699 /* Remove the padded bytes, if there are any */
2701 skb_record_rx_queue(skb, fcb->rq);
2702 skb_pull(skb, amount_pull);
2705 /* Get receive timestamp from the skb */
2706 if (priv->hwts_rx_en) {
2707 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2708 u64 *ns = (u64 *) skb->data;
2709 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2710 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2714 skb_pull(skb, priv->padding);
2716 if (dev->features & NETIF_F_RXCSUM)
2717 gfar_rx_checksum(skb, fcb);
2719 /* Tell the skb what kind of packet this is */
2720 skb->protocol = eth_type_trans(skb, dev);
2723 * There's need to check for NETIF_F_HW_VLAN_RX here.
2724 * Even if vlan rx accel is disabled, on some chips
2725 * RXFCB_VLN is pseudo randomly set.
2727 if (dev->features & NETIF_F_HW_VLAN_RX &&
2728 fcb->flags & RXFCB_VLN)
2729 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2731 /* Send the packet up the stack */
2732 ret = netif_receive_skb(skb);
2734 if (NET_RX_DROP == ret)
2735 priv->extra_stats.kernel_dropped++;
2740 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2741 * until the budget/quota has been reached. Returns the number
2744 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2746 struct net_device *dev = rx_queue->dev;
2747 struct rxbd8 *bdp, *base;
2748 struct sk_buff *skb;
2752 struct gfar_private *priv = netdev_priv(dev);
2754 /* Get the first full descriptor */
2755 bdp = rx_queue->cur_rx;
2756 base = rx_queue->rx_bd_base;
2758 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2760 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2761 struct sk_buff *newskb;
2764 /* Add another skb for the future */
2765 newskb = gfar_new_skb(dev);
2767 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2769 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2770 priv->rx_buffer_size, DMA_FROM_DEVICE);
2772 if (unlikely(!(bdp->status & RXBD_ERR) &&
2773 bdp->length > priv->rx_buffer_size))
2774 bdp->status = RXBD_LARGE;
2776 /* We drop the frame if we failed to allocate a new buffer */
2777 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2778 bdp->status & RXBD_ERR)) {
2779 count_errors(bdp->status, dev);
2781 if (unlikely(!newskb))
2784 skb_queue_head(&priv->rx_recycle, skb);
2786 /* Increment the number of packets */
2787 rx_queue->stats.rx_packets++;
2791 pkt_len = bdp->length - ETH_FCS_LEN;
2792 /* Remove the FCS from the packet length */
2793 skb_put(skb, pkt_len);
2794 rx_queue->stats.rx_bytes += pkt_len;
2795 skb_record_rx_queue(skb, rx_queue->qindex);
2796 gfar_process_frame(dev, skb, amount_pull);
2799 netif_warn(priv, rx_err, dev, "Missing skb!\n");
2800 rx_queue->stats.rx_dropped++;
2801 priv->extra_stats.rx_skbmissing++;
2806 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2808 /* Setup the new bdp */
2809 gfar_new_rxbdp(rx_queue, bdp, newskb);
2811 /* Update to the next pointer */
2812 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2814 /* update to point at the next skb */
2815 rx_queue->skb_currx =
2816 (rx_queue->skb_currx + 1) &
2817 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2820 /* Update the current rxbd pointer to be the next one */
2821 rx_queue->cur_rx = bdp;
2826 static int gfar_poll(struct napi_struct *napi, int budget)
2828 struct gfar_priv_grp *gfargrp = container_of(napi,
2829 struct gfar_priv_grp, napi);
2830 struct gfar_private *priv = gfargrp->priv;
2831 struct gfar __iomem *regs = gfargrp->regs;
2832 struct gfar_priv_tx_q *tx_queue = NULL;
2833 struct gfar_priv_rx_q *rx_queue = NULL;
2834 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2835 int tx_cleaned = 0, i, left_over_budget = budget;
2836 unsigned long serviced_queues = 0;
2839 num_queues = gfargrp->num_rx_queues;
2840 budget_per_queue = budget/num_queues;
2842 /* Clear IEVENT, so interrupts aren't called again
2843 * because of the packets that have already arrived */
2844 gfar_write(®s->ievent, IEVENT_RTX_MASK);
2846 while (num_queues && left_over_budget) {
2848 budget_per_queue = left_over_budget/num_queues;
2849 left_over_budget = 0;
2851 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2852 if (test_bit(i, &serviced_queues))
2854 rx_queue = priv->rx_queue[i];
2855 tx_queue = priv->tx_queue[rx_queue->qindex];
2857 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2858 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2860 rx_cleaned += rx_cleaned_per_queue;
2861 if(rx_cleaned_per_queue < budget_per_queue) {
2862 left_over_budget = left_over_budget +
2863 (budget_per_queue - rx_cleaned_per_queue);
2864 set_bit(i, &serviced_queues);
2873 if (rx_cleaned < budget) {
2874 napi_complete(napi);
2876 /* Clear the halt bit in RSTAT */
2877 gfar_write(®s->rstat, gfargrp->rstat);
2879 gfar_write(®s->imask, IMASK_DEFAULT);
2881 /* If we are coalescing interrupts, update the timer */
2882 /* Otherwise, clear it */
2883 gfar_configure_coalescing(priv,
2884 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2890 #ifdef CONFIG_NET_POLL_CONTROLLER
2892 * Polling 'interrupt' - used by things like netconsole to send skbs
2893 * without having to re-enable interrupts. It's not called while
2894 * the interrupt routine is executing.
2896 static void gfar_netpoll(struct net_device *dev)
2898 struct gfar_private *priv = netdev_priv(dev);
2901 /* If the device has multiple interrupts, run tx/rx */
2902 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2903 for (i = 0; i < priv->num_grps; i++) {
2904 disable_irq(priv->gfargrp[i].interruptTransmit);
2905 disable_irq(priv->gfargrp[i].interruptReceive);
2906 disable_irq(priv->gfargrp[i].interruptError);
2907 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2909 enable_irq(priv->gfargrp[i].interruptError);
2910 enable_irq(priv->gfargrp[i].interruptReceive);
2911 enable_irq(priv->gfargrp[i].interruptTransmit);
2914 for (i = 0; i < priv->num_grps; i++) {
2915 disable_irq(priv->gfargrp[i].interruptTransmit);
2916 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2918 enable_irq(priv->gfargrp[i].interruptTransmit);
2924 /* The interrupt handler for devices with one interrupt */
2925 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2927 struct gfar_priv_grp *gfargrp = grp_id;
2929 /* Save ievent for future reference */
2930 u32 events = gfar_read(&gfargrp->regs->ievent);
2932 /* Check for reception */
2933 if (events & IEVENT_RX_MASK)
2934 gfar_receive(irq, grp_id);
2936 /* Check for transmit completion */
2937 if (events & IEVENT_TX_MASK)
2938 gfar_transmit(irq, grp_id);
2940 /* Check for errors */
2941 if (events & IEVENT_ERR_MASK)
2942 gfar_error(irq, grp_id);
2947 /* Called every time the controller might need to be made
2948 * aware of new link state. The PHY code conveys this
2949 * information through variables in the phydev structure, and this
2950 * function converts those variables into the appropriate
2951 * register values, and can bring down the device if needed.
2953 static void adjust_link(struct net_device *dev)
2955 struct gfar_private *priv = netdev_priv(dev);
2956 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2957 unsigned long flags;
2958 struct phy_device *phydev = priv->phydev;
2961 local_irq_save(flags);
2965 u32 tempval = gfar_read(®s->maccfg2);
2966 u32 ecntrl = gfar_read(®s->ecntrl);
2968 /* Now we make sure that we can be in full duplex mode.
2969 * If not, we operate in half-duplex mode. */
2970 if (phydev->duplex != priv->oldduplex) {
2972 if (!(phydev->duplex))
2973 tempval &= ~(MACCFG2_FULL_DUPLEX);
2975 tempval |= MACCFG2_FULL_DUPLEX;
2977 priv->oldduplex = phydev->duplex;
2980 if (phydev->speed != priv->oldspeed) {
2982 switch (phydev->speed) {
2985 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2987 ecntrl &= ~(ECNTRL_R100);
2992 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2994 /* Reduced mode distinguishes
2995 * between 10 and 100 */
2996 if (phydev->speed == SPEED_100)
2997 ecntrl |= ECNTRL_R100;
2999 ecntrl &= ~(ECNTRL_R100);
3002 netif_warn(priv, link, dev,
3003 "Ack! Speed (%d) is not 10/100/1000!\n",
3008 priv->oldspeed = phydev->speed;
3011 gfar_write(®s->maccfg2, tempval);
3012 gfar_write(®s->ecntrl, ecntrl);
3014 if (!priv->oldlink) {
3018 } else if (priv->oldlink) {
3022 priv->oldduplex = -1;
3025 if (new_state && netif_msg_link(priv))
3026 phy_print_status(phydev);
3028 local_irq_restore(flags);
3031 /* Update the hash table based on the current list of multicast
3032 * addresses we subscribe to. Also, change the promiscuity of
3033 * the device based on the flags (this function is called
3034 * whenever dev->flags is changed */
3035 static void gfar_set_multi(struct net_device *dev)
3037 struct netdev_hw_addr *ha;
3038 struct gfar_private *priv = netdev_priv(dev);
3039 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3042 if (dev->flags & IFF_PROMISC) {
3043 /* Set RCTRL to PROM */
3044 tempval = gfar_read(®s->rctrl);
3045 tempval |= RCTRL_PROM;
3046 gfar_write(®s->rctrl, tempval);
3048 /* Set RCTRL to not PROM */
3049 tempval = gfar_read(®s->rctrl);
3050 tempval &= ~(RCTRL_PROM);
3051 gfar_write(®s->rctrl, tempval);
3054 if (dev->flags & IFF_ALLMULTI) {
3055 /* Set the hash to rx all multicast frames */
3056 gfar_write(®s->igaddr0, 0xffffffff);
3057 gfar_write(®s->igaddr1, 0xffffffff);
3058 gfar_write(®s->igaddr2, 0xffffffff);
3059 gfar_write(®s->igaddr3, 0xffffffff);
3060 gfar_write(®s->igaddr4, 0xffffffff);
3061 gfar_write(®s->igaddr5, 0xffffffff);
3062 gfar_write(®s->igaddr6, 0xffffffff);
3063 gfar_write(®s->igaddr7, 0xffffffff);
3064 gfar_write(®s->gaddr0, 0xffffffff);
3065 gfar_write(®s->gaddr1, 0xffffffff);
3066 gfar_write(®s->gaddr2, 0xffffffff);
3067 gfar_write(®s->gaddr3, 0xffffffff);
3068 gfar_write(®s->gaddr4, 0xffffffff);
3069 gfar_write(®s->gaddr5, 0xffffffff);
3070 gfar_write(®s->gaddr6, 0xffffffff);
3071 gfar_write(®s->gaddr7, 0xffffffff);
3076 /* zero out the hash */
3077 gfar_write(®s->igaddr0, 0x0);
3078 gfar_write(®s->igaddr1, 0x0);
3079 gfar_write(®s->igaddr2, 0x0);
3080 gfar_write(®s->igaddr3, 0x0);
3081 gfar_write(®s->igaddr4, 0x0);
3082 gfar_write(®s->igaddr5, 0x0);
3083 gfar_write(®s->igaddr6, 0x0);
3084 gfar_write(®s->igaddr7, 0x0);
3085 gfar_write(®s->gaddr0, 0x0);
3086 gfar_write(®s->gaddr1, 0x0);
3087 gfar_write(®s->gaddr2, 0x0);
3088 gfar_write(®s->gaddr3, 0x0);
3089 gfar_write(®s->gaddr4, 0x0);
3090 gfar_write(®s->gaddr5, 0x0);
3091 gfar_write(®s->gaddr6, 0x0);
3092 gfar_write(®s->gaddr7, 0x0);
3094 /* If we have extended hash tables, we need to
3095 * clear the exact match registers to prepare for
3097 if (priv->extended_hash) {
3098 em_num = GFAR_EM_NUM + 1;
3099 gfar_clear_exact_match(dev);
3106 if (netdev_mc_empty(dev))
3109 /* Parse the list, and set the appropriate bits */
3110 netdev_for_each_mc_addr(ha, dev) {
3112 gfar_set_mac_for_addr(dev, idx, ha->addr);
3115 gfar_set_hash_for_addr(dev, ha->addr);
3121 /* Clears each of the exact match registers to zero, so they
3122 * don't interfere with normal reception */
3123 static void gfar_clear_exact_match(struct net_device *dev)
3126 static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
3128 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3129 gfar_set_mac_for_addr(dev, idx, zero_arr);
3132 /* Set the appropriate hash bit for the given addr */
3133 /* The algorithm works like so:
3134 * 1) Take the Destination Address (ie the multicast address), and
3135 * do a CRC on it (little endian), and reverse the bits of the
3137 * 2) Use the 8 most significant bits as a hash into a 256-entry
3138 * table. The table is controlled through 8 32-bit registers:
3139 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3140 * gaddr7. This means that the 3 most significant bits in the
3141 * hash index which gaddr register to use, and the 5 other bits
3142 * indicate which bit (assuming an IBM numbering scheme, which
3143 * for PowerPC (tm) is usually the case) in the register holds
3145 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3148 struct gfar_private *priv = netdev_priv(dev);
3149 u32 result = ether_crc(MAC_ADDR_LEN, addr);
3150 int width = priv->hash_width;
3151 u8 whichbit = (result >> (32 - width)) & 0x1f;
3152 u8 whichreg = result >> (32 - width + 5);
3153 u32 value = (1 << (31-whichbit));
3155 tempval = gfar_read(priv->hash_regs[whichreg]);
3157 gfar_write(priv->hash_regs[whichreg], tempval);
3161 /* There are multiple MAC Address register pairs on some controllers
3162 * This function sets the numth pair to a given address
3164 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3167 struct gfar_private *priv = netdev_priv(dev);
3168 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3170 char tmpbuf[MAC_ADDR_LEN];
3172 u32 __iomem *macptr = ®s->macstnaddr1;
3176 /* Now copy it into the mac registers backwards, cuz */
3177 /* little endian is silly */
3178 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
3179 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
3181 gfar_write(macptr, *((u32 *) (tmpbuf)));
3183 tempval = *((u32 *) (tmpbuf + 4));
3185 gfar_write(macptr+1, tempval);
3188 /* GFAR error interrupt handler */
3189 static irqreturn_t gfar_error(int irq, void *grp_id)
3191 struct gfar_priv_grp *gfargrp = grp_id;
3192 struct gfar __iomem *regs = gfargrp->regs;
3193 struct gfar_private *priv= gfargrp->priv;
3194 struct net_device *dev = priv->ndev;
3196 /* Save ievent for future reference */
3197 u32 events = gfar_read(®s->ievent);
3200 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
3202 /* Magic Packet is not an error. */
3203 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3204 (events & IEVENT_MAG))
3205 events &= ~IEVENT_MAG;
3208 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3209 netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3210 events, gfar_read(®s->imask));
3212 /* Update the error counters */
3213 if (events & IEVENT_TXE) {
3214 dev->stats.tx_errors++;
3216 if (events & IEVENT_LC)
3217 dev->stats.tx_window_errors++;
3218 if (events & IEVENT_CRL)
3219 dev->stats.tx_aborted_errors++;
3220 if (events & IEVENT_XFUN) {
3221 unsigned long flags;
3223 netif_dbg(priv, tx_err, dev,
3224 "TX FIFO underrun, packet dropped\n");
3225 dev->stats.tx_dropped++;
3226 priv->extra_stats.tx_underrun++;
3228 local_irq_save(flags);
3231 /* Reactivate the Tx Queues */
3232 gfar_write(®s->tstat, gfargrp->tstat);
3235 local_irq_restore(flags);
3237 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3239 if (events & IEVENT_BSY) {
3240 dev->stats.rx_errors++;
3241 priv->extra_stats.rx_bsy++;
3243 gfar_receive(irq, grp_id);
3245 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3246 gfar_read(®s->rstat));
3248 if (events & IEVENT_BABR) {
3249 dev->stats.rx_errors++;
3250 priv->extra_stats.rx_babr++;
3252 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3254 if (events & IEVENT_EBERR) {
3255 priv->extra_stats.eberr++;
3256 netif_dbg(priv, rx_err, dev, "bus error\n");
3258 if (events & IEVENT_RXC)
3259 netif_dbg(priv, rx_status, dev, "control frame\n");
3261 if (events & IEVENT_BABT) {
3262 priv->extra_stats.tx_babt++;
3263 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3268 static struct of_device_id gfar_match[] =
3272 .compatible = "gianfar",
3275 .compatible = "fsl,etsec2",
3279 MODULE_DEVICE_TABLE(of, gfar_match);
3281 /* Structure for a device driver */
3282 static struct platform_driver gfar_driver = {
3284 .name = "fsl-gianfar",
3285 .owner = THIS_MODULE,
3287 .of_match_table = gfar_match,
3289 .probe = gfar_probe,
3290 .remove = gfar_remove,
3293 static int __init gfar_init(void)
3295 return platform_driver_register(&gfar_driver);
3298 static void __exit gfar_exit(void)
3300 platform_driver_unregister(&gfar_driver);
3303 module_init(gfar_init);
3304 module_exit(gfar_exit);