2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
4 * 2005-2009 (c) Aeroflex Gaisler AB
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library.
9 * Full documentation of both cores can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
12 * The Gigabit version supports scatter/gather DMA, any alignment of
13 * buffers and checksum offloading.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Contributors: Kristoffer Glembo
25 #include <linux/module.h>
26 #include <linux/uaccess.h>
27 #include <linux/init.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
33 #include <linux/crc32.h>
34 #include <linux/mii.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37 #include <linux/slab.h>
38 #include <asm/cacheflush.h>
39 #include <asm/byteorder.h>
42 #include <asm/idprom.h>
47 #define GRETH_DEF_MSG_ENABLE \
56 static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
57 module_param(greth_debug, int, 0);
58 MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
60 /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
61 static int macaddr[6];
62 module_param_array(macaddr, int, NULL, 0);
63 MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
65 static int greth_edcl = 1;
66 module_param(greth_edcl, int, 0);
67 MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
69 static int greth_open(struct net_device *dev);
70 static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
71 struct net_device *dev);
72 static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
73 struct net_device *dev);
74 static int greth_rx(struct net_device *dev, int limit);
75 static int greth_rx_gbit(struct net_device *dev, int limit);
76 static void greth_clean_tx(struct net_device *dev);
77 static void greth_clean_tx_gbit(struct net_device *dev);
78 static irqreturn_t greth_interrupt(int irq, void *dev_id);
79 static int greth_close(struct net_device *dev);
80 static int greth_set_mac_add(struct net_device *dev, void *p);
81 static void greth_set_multicast_list(struct net_device *dev);
83 #define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
84 #define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
85 #define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
86 #define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
88 #define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
89 #define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
90 #define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
92 static void greth_print_rx_packet(void *addr, int len)
94 print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
98 static void greth_print_tx_packet(struct sk_buff *skb)
103 if (skb_shinfo(skb)->nr_frags == 0)
106 length = skb_headlen(skb);
108 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
109 skb->data, length, true);
111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
113 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
114 phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
115 skb_shinfo(skb)->frags[i].page_offset,
120 static inline void greth_enable_tx(struct greth_private *greth)
123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
126 static inline void greth_disable_tx(struct greth_private *greth)
128 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
131 static inline void greth_enable_rx(struct greth_private *greth)
134 GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
137 static inline void greth_disable_rx(struct greth_private *greth)
139 GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
142 static inline void greth_enable_irqs(struct greth_private *greth)
144 GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
147 static inline void greth_disable_irqs(struct greth_private *greth)
149 GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
152 static inline void greth_write_bd(u32 *bd, u32 val)
154 __raw_writel(cpu_to_be32(val), bd);
157 static inline u32 greth_read_bd(u32 *bd)
159 return be32_to_cpu(__raw_readl(bd));
162 static void greth_clean_rings(struct greth_private *greth)
165 struct greth_bd *rx_bdp = greth->rx_bd_base;
166 struct greth_bd *tx_bdp = greth->tx_bd_base;
168 if (greth->gbit_mac) {
170 /* Free and unmap RX buffers */
171 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
172 if (greth->rx_skbuff[i] != NULL) {
173 dev_kfree_skb(greth->rx_skbuff[i]);
174 dma_unmap_single(greth->dev,
175 greth_read_bd(&rx_bdp->addr),
176 MAX_FRAME_SIZE+NET_IP_ALIGN,
182 while (greth->tx_free < GRETH_TXBD_NUM) {
184 struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
185 int nr_frags = skb_shinfo(skb)->nr_frags;
186 tx_bdp = greth->tx_bd_base + greth->tx_last;
187 greth->tx_last = NEXT_TX(greth->tx_last);
189 dma_unmap_single(greth->dev,
190 greth_read_bd(&tx_bdp->addr),
194 for (i = 0; i < nr_frags; i++) {
195 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
196 tx_bdp = greth->tx_bd_base + greth->tx_last;
198 dma_unmap_page(greth->dev,
199 greth_read_bd(&tx_bdp->addr),
203 greth->tx_last = NEXT_TX(greth->tx_last);
205 greth->tx_free += nr_frags+1;
210 } else { /* 10/100 Mbps MAC */
212 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
213 kfree(greth->rx_bufs[i]);
214 dma_unmap_single(greth->dev,
215 greth_read_bd(&rx_bdp->addr),
219 for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
220 kfree(greth->tx_bufs[i]);
221 dma_unmap_single(greth->dev,
222 greth_read_bd(&tx_bdp->addr),
229 static int greth_init_rings(struct greth_private *greth)
232 struct greth_bd *rx_bd, *tx_bd;
236 rx_bd = greth->rx_bd_base;
237 tx_bd = greth->tx_bd_base;
239 /* Initialize descriptor rings and buffers */
240 if (greth->gbit_mac) {
242 for (i = 0; i < GRETH_RXBD_NUM; i++) {
243 skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
245 if (netif_msg_ifup(greth))
246 dev_err(greth->dev, "Error allocating DMA ring.\n");
249 skb_reserve(skb, NET_IP_ALIGN);
250 dma_addr = dma_map_single(greth->dev,
252 MAX_FRAME_SIZE+NET_IP_ALIGN,
255 if (dma_mapping_error(greth->dev, dma_addr)) {
256 if (netif_msg_ifup(greth))
257 dev_err(greth->dev, "Could not create initial DMA mapping\n");
260 greth->rx_skbuff[i] = skb;
261 greth_write_bd(&rx_bd[i].addr, dma_addr);
262 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
267 /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
268 for (i = 0; i < GRETH_RXBD_NUM; i++) {
270 greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
272 if (greth->rx_bufs[i] == NULL) {
273 if (netif_msg_ifup(greth))
274 dev_err(greth->dev, "Error allocating DMA ring.\n");
278 dma_addr = dma_map_single(greth->dev,
283 if (dma_mapping_error(greth->dev, dma_addr)) {
284 if (netif_msg_ifup(greth))
285 dev_err(greth->dev, "Could not create initial DMA mapping\n");
288 greth_write_bd(&rx_bd[i].addr, dma_addr);
289 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
291 for (i = 0; i < GRETH_TXBD_NUM; i++) {
293 greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
295 if (greth->tx_bufs[i] == NULL) {
296 if (netif_msg_ifup(greth))
297 dev_err(greth->dev, "Error allocating DMA ring.\n");
301 dma_addr = dma_map_single(greth->dev,
306 if (dma_mapping_error(greth->dev, dma_addr)) {
307 if (netif_msg_ifup(greth))
308 dev_err(greth->dev, "Could not create initial DMA mapping\n");
311 greth_write_bd(&tx_bd[i].addr, dma_addr);
312 greth_write_bd(&tx_bd[i].stat, 0);
315 greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
316 greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
318 /* Initialize pointers. */
322 greth->tx_free = GRETH_TXBD_NUM;
324 /* Initialize descriptor base address */
325 GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
326 GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
331 greth_clean_rings(greth);
335 static int greth_open(struct net_device *dev)
337 struct greth_private *greth = netdev_priv(dev);
340 err = greth_init_rings(greth);
342 if (netif_msg_ifup(greth))
343 dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
347 err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
349 if (netif_msg_ifup(greth))
350 dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
351 greth_clean_rings(greth);
355 if (netif_msg_ifup(greth))
356 dev_dbg(&dev->dev, " starting queue\n");
357 netif_start_queue(dev);
359 GRETH_REGSAVE(greth->regs->status, 0xFF);
361 napi_enable(&greth->napi);
363 greth_enable_irqs(greth);
364 greth_enable_tx(greth);
365 greth_enable_rx(greth);
370 static int greth_close(struct net_device *dev)
372 struct greth_private *greth = netdev_priv(dev);
374 napi_disable(&greth->napi);
376 greth_disable_irqs(greth);
377 greth_disable_tx(greth);
378 greth_disable_rx(greth);
380 netif_stop_queue(dev);
382 free_irq(greth->irq, (void *) dev);
384 greth_clean_rings(greth);
390 greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
392 struct greth_private *greth = netdev_priv(dev);
393 struct greth_bd *bdp;
394 int err = NETDEV_TX_OK;
395 u32 status, dma_addr;
397 bdp = greth->tx_bd_base + greth->tx_next;
399 if (unlikely(greth->tx_free <= 0)) {
400 netif_stop_queue(dev);
401 return NETDEV_TX_BUSY;
404 if (netif_msg_pktdata(greth))
405 greth_print_tx_packet(skb);
408 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
409 dev->stats.tx_errors++;
413 dma_addr = greth_read_bd(&bdp->addr);
415 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
417 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
419 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
421 /* Wrap around descriptor ring */
422 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
423 status |= GRETH_BD_WR;
426 greth->tx_next = NEXT_TX(greth->tx_next);
429 /* No more descriptors */
430 if (unlikely(greth->tx_free == 0)) {
432 /* Free transmitted descriptors */
435 /* If nothing was cleaned, stop queue & wait for irq */
436 if (unlikely(greth->tx_free == 0)) {
437 status |= GRETH_BD_IE;
438 netif_stop_queue(dev);
442 /* Write descriptor control word and enable transmission */
443 greth_write_bd(&bdp->stat, status);
444 greth_enable_tx(greth);
453 greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
455 struct greth_private *greth = netdev_priv(dev);
456 struct greth_bd *bdp;
457 u32 status = 0, dma_addr;
458 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
460 nr_frags = skb_shinfo(skb)->nr_frags;
462 if (greth->tx_free < nr_frags + 1) {
463 netif_stop_queue(dev);
464 err = NETDEV_TX_BUSY;
468 if (netif_msg_pktdata(greth))
469 greth_print_tx_packet(skb);
471 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
472 dev->stats.tx_errors++;
476 /* Save skb pointer. */
477 greth->tx_skbuff[greth->tx_next] = skb;
481 status = GRETH_TXBD_MORE;
483 status |= GRETH_TXBD_CSALL;
484 status |= skb_headlen(skb) & GRETH_BD_LEN;
485 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
486 status |= GRETH_BD_WR;
489 bdp = greth->tx_bd_base + greth->tx_next;
490 greth_write_bd(&bdp->stat, status);
491 dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
493 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
496 greth_write_bd(&bdp->addr, dma_addr);
498 curr_tx = NEXT_TX(greth->tx_next);
501 for (i = 0; i < nr_frags; i++) {
502 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
503 greth->tx_skbuff[curr_tx] = NULL;
504 bdp = greth->tx_bd_base + curr_tx;
506 status = GRETH_TXBD_CSALL | GRETH_BD_EN;
507 status |= frag->size & GRETH_BD_LEN;
509 /* Wrap around descriptor ring */
510 if (curr_tx == GRETH_TXBD_NUM_MASK)
511 status |= GRETH_BD_WR;
513 /* More fragments left */
514 if (i < nr_frags - 1)
515 status |= GRETH_TXBD_MORE;
517 /* ... last fragment, check if out of descriptors */
518 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
520 /* Enable interrupts and stop queue */
521 status |= GRETH_BD_IE;
522 netif_stop_queue(dev);
525 greth_write_bd(&bdp->stat, status);
527 dma_addr = dma_map_page(greth->dev,
533 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
536 greth_write_bd(&bdp->addr, dma_addr);
538 curr_tx = NEXT_TX(curr_tx);
543 /* Enable the descriptor chain by enabling the first descriptor */
544 bdp = greth->tx_bd_base + greth->tx_next;
545 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
546 greth->tx_next = curr_tx;
547 greth->tx_free -= nr_frags + 1;
551 greth_enable_tx(greth);
556 /* Unmap SKB mappings that succeeded and disable descriptor */
557 for (i = 0; greth->tx_next + i != curr_tx; i++) {
558 bdp = greth->tx_bd_base + greth->tx_next + i;
559 dma_unmap_single(greth->dev,
560 greth_read_bd(&bdp->addr),
561 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
563 greth_write_bd(&bdp->stat, 0);
567 dev_warn(greth->dev, "Could not create TX DMA mapping\n");
574 static irqreturn_t greth_interrupt(int irq, void *dev_id)
576 struct net_device *dev = dev_id;
577 struct greth_private *greth;
579 irqreturn_t retval = IRQ_NONE;
581 greth = netdev_priv(dev);
583 spin_lock(&greth->devlock);
585 /* Get the interrupt events that caused us to be here. */
586 status = GRETH_REGLOAD(greth->regs->status);
588 /* Handle rx and tx interrupts through poll */
589 if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
591 /* Clear interrupt status */
592 GRETH_REGORIN(greth->regs->status,
593 status & (GRETH_INT_RX | GRETH_INT_TX));
595 retval = IRQ_HANDLED;
597 /* Disable interrupts and schedule poll() */
598 greth_disable_irqs(greth);
599 napi_schedule(&greth->napi);
603 spin_unlock(&greth->devlock);
608 static void greth_clean_tx(struct net_device *dev)
610 struct greth_private *greth;
611 struct greth_bd *bdp;
614 greth = netdev_priv(dev);
617 bdp = greth->tx_bd_base + greth->tx_last;
618 stat = greth_read_bd(&bdp->stat);
620 if (unlikely(stat & GRETH_BD_EN))
623 if (greth->tx_free == GRETH_TXBD_NUM)
626 /* Check status for errors */
627 if (unlikely(stat & GRETH_TXBD_STATUS)) {
628 dev->stats.tx_errors++;
629 if (stat & GRETH_TXBD_ERR_AL)
630 dev->stats.tx_aborted_errors++;
631 if (stat & GRETH_TXBD_ERR_UE)
632 dev->stats.tx_fifo_errors++;
634 dev->stats.tx_packets++;
635 greth->tx_last = NEXT_TX(greth->tx_last);
639 if (greth->tx_free > 0) {
640 netif_wake_queue(dev);
645 static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
647 /* Check status for errors */
648 if (unlikely(stat & GRETH_TXBD_STATUS)) {
649 dev->stats.tx_errors++;
650 if (stat & GRETH_TXBD_ERR_AL)
651 dev->stats.tx_aborted_errors++;
652 if (stat & GRETH_TXBD_ERR_UE)
653 dev->stats.tx_fifo_errors++;
654 if (stat & GRETH_TXBD_ERR_LC)
655 dev->stats.tx_aborted_errors++;
657 dev->stats.tx_packets++;
660 static void greth_clean_tx_gbit(struct net_device *dev)
662 struct greth_private *greth;
663 struct greth_bd *bdp, *bdp_last_frag;
668 greth = netdev_priv(dev);
670 while (greth->tx_free < GRETH_TXBD_NUM) {
672 skb = greth->tx_skbuff[greth->tx_last];
674 nr_frags = skb_shinfo(skb)->nr_frags;
676 /* We only clean fully completed SKBs */
677 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
678 stat = bdp_last_frag->stat;
680 if (stat & GRETH_BD_EN)
683 greth->tx_skbuff[greth->tx_last] = NULL;
685 greth_update_tx_stats(dev, stat);
687 bdp = greth->tx_bd_base + greth->tx_last;
689 greth->tx_last = NEXT_TX(greth->tx_last);
691 dma_unmap_single(greth->dev,
692 greth_read_bd(&bdp->addr),
696 for (i = 0; i < nr_frags; i++) {
697 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
698 bdp = greth->tx_bd_base + greth->tx_last;
700 dma_unmap_page(greth->dev,
701 greth_read_bd(&bdp->addr),
705 greth->tx_last = NEXT_TX(greth->tx_last);
707 greth->tx_free += nr_frags+1;
710 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
711 netif_wake_queue(dev);
715 static int greth_pending_packets(struct greth_private *greth)
717 struct greth_bd *bdp;
719 bdp = greth->rx_bd_base + greth->rx_cur;
720 status = greth_read_bd(&bdp->stat);
721 if (status & GRETH_BD_EN)
727 static int greth_rx(struct net_device *dev, int limit)
729 struct greth_private *greth;
730 struct greth_bd *bdp;
734 u32 status, dma_addr;
736 greth = netdev_priv(dev);
738 for (count = 0; count < limit; ++count) {
740 bdp = greth->rx_bd_base + greth->rx_cur;
741 status = greth_read_bd(&bdp->stat);
742 dma_addr = greth_read_bd(&bdp->addr);
745 if (unlikely(status & GRETH_BD_EN)) {
749 /* Check status for errors. */
750 if (unlikely(status & GRETH_RXBD_STATUS)) {
751 if (status & GRETH_RXBD_ERR_FT) {
752 dev->stats.rx_length_errors++;
755 if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
756 dev->stats.rx_frame_errors++;
759 if (status & GRETH_RXBD_ERR_CRC) {
760 dev->stats.rx_crc_errors++;
765 dev->stats.rx_errors++;
769 pkt_len = status & GRETH_BD_LEN;
771 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
773 if (unlikely(skb == NULL)) {
776 dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
778 dev->stats.rx_dropped++;
781 skb_reserve(skb, NET_IP_ALIGN);
784 dma_sync_single_for_cpu(greth->dev,
789 if (netif_msg_pktdata(greth))
790 greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
792 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
794 skb->protocol = eth_type_trans(skb, dev);
795 dev->stats.rx_packets++;
796 netif_receive_skb(skb);
800 status = GRETH_BD_EN | GRETH_BD_IE;
801 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
802 status |= GRETH_BD_WR;
806 greth_write_bd(&bdp->stat, status);
808 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
810 greth_enable_rx(greth);
812 greth->rx_cur = NEXT_RX(greth->rx_cur);
818 static inline int hw_checksummed(u32 status)
821 if (status & GRETH_RXBD_IP_FRAG)
824 if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
827 if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
830 if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
836 static int greth_rx_gbit(struct net_device *dev, int limit)
838 struct greth_private *greth;
839 struct greth_bd *bdp;
840 struct sk_buff *skb, *newskb;
843 u32 status, dma_addr;
845 greth = netdev_priv(dev);
847 for (count = 0; count < limit; ++count) {
849 bdp = greth->rx_bd_base + greth->rx_cur;
850 skb = greth->rx_skbuff[greth->rx_cur];
851 status = greth_read_bd(&bdp->stat);
854 if (status & GRETH_BD_EN)
857 /* Check status for errors. */
858 if (unlikely(status & GRETH_RXBD_STATUS)) {
860 if (status & GRETH_RXBD_ERR_FT) {
861 dev->stats.rx_length_errors++;
864 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
865 dev->stats.rx_frame_errors++;
867 } else if (status & GRETH_RXBD_ERR_CRC) {
868 dev->stats.rx_crc_errors++;
873 /* Allocate new skb to replace current, not needed if the
874 * current skb can be reused */
875 if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
876 skb_reserve(newskb, NET_IP_ALIGN);
878 dma_addr = dma_map_single(greth->dev,
880 MAX_FRAME_SIZE + NET_IP_ALIGN,
883 if (!dma_mapping_error(greth->dev, dma_addr)) {
884 /* Process the incoming frame. */
885 pkt_len = status & GRETH_BD_LEN;
887 dma_unmap_single(greth->dev,
888 greth_read_bd(&bdp->addr),
889 MAX_FRAME_SIZE + NET_IP_ALIGN,
892 if (netif_msg_pktdata(greth))
893 greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
895 skb_put(skb, pkt_len);
897 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
898 skb->ip_summed = CHECKSUM_UNNECESSARY;
900 skb_checksum_none_assert(skb);
902 skb->protocol = eth_type_trans(skb, dev);
903 dev->stats.rx_packets++;
904 netif_receive_skb(skb);
906 greth->rx_skbuff[greth->rx_cur] = newskb;
907 greth_write_bd(&bdp->addr, dma_addr);
910 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
911 dev_kfree_skb(newskb);
912 /* reusing current skb, so it is a drop */
913 dev->stats.rx_dropped++;
916 /* Bad Frame transfer, the skb is reused */
917 dev->stats.rx_dropped++;
919 /* Failed Allocating a new skb. This is rather stupid
920 * but the current "filled" skb is reused, as if
921 * transfer failure. One could argue that RX descriptor
922 * table handling should be divided into cleaning and
923 * filling as the TX part of the driver
926 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
927 /* reusing current skb, so it is a drop */
928 dev->stats.rx_dropped++;
931 status = GRETH_BD_EN | GRETH_BD_IE;
932 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
933 status |= GRETH_BD_WR;
937 greth_write_bd(&bdp->stat, status);
938 greth_enable_rx(greth);
939 greth->rx_cur = NEXT_RX(greth->rx_cur);
946 static int greth_poll(struct napi_struct *napi, int budget)
948 struct greth_private *greth;
950 greth = container_of(napi, struct greth_private, napi);
952 if (greth->gbit_mac) {
953 greth_clean_tx_gbit(greth->netdev);
955 greth_clean_tx(greth->netdev);
959 if (greth->gbit_mac) {
960 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
962 work_done += greth_rx(greth->netdev, budget - work_done);
965 if (work_done < budget) {
969 if (greth_pending_packets(greth)) {
970 napi_reschedule(napi);
975 greth_enable_irqs(greth);
979 static int greth_set_mac_add(struct net_device *dev, void *p)
981 struct sockaddr *addr = p;
982 struct greth_private *greth;
983 struct greth_regs *regs;
985 greth = netdev_priv(dev);
986 regs = (struct greth_regs *) greth->regs;
988 if (!is_valid_ether_addr(addr->sa_data))
991 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
993 GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
994 GRETH_REGSAVE(regs->esa_lsb,
995 addr->sa_data[2] << 24 | addr->
996 sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
1000 static u32 greth_hash_get_index(__u8 *addr)
1002 return (ether_crc(6, addr)) & 0x3F;
1005 static void greth_set_hash_filter(struct net_device *dev)
1007 struct netdev_hw_addr *ha;
1008 struct greth_private *greth = netdev_priv(dev);
1009 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1013 mc_filter[0] = mc_filter[1] = 0;
1015 netdev_for_each_mc_addr(ha, dev) {
1016 bitnr = greth_hash_get_index(ha->addr);
1017 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1020 GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1021 GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1024 static void greth_set_multicast_list(struct net_device *dev)
1027 struct greth_private *greth = netdev_priv(dev);
1028 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1030 cfg = GRETH_REGLOAD(regs->control);
1031 if (dev->flags & IFF_PROMISC)
1032 cfg |= GRETH_CTRL_PR;
1034 cfg &= ~GRETH_CTRL_PR;
1036 if (greth->multicast) {
1037 if (dev->flags & IFF_ALLMULTI) {
1038 GRETH_REGSAVE(regs->hash_msb, -1);
1039 GRETH_REGSAVE(regs->hash_lsb, -1);
1040 cfg |= GRETH_CTRL_MCEN;
1041 GRETH_REGSAVE(regs->control, cfg);
1045 if (netdev_mc_empty(dev)) {
1046 cfg &= ~GRETH_CTRL_MCEN;
1047 GRETH_REGSAVE(regs->control, cfg);
1051 /* Setup multicast filter */
1052 greth_set_hash_filter(dev);
1053 cfg |= GRETH_CTRL_MCEN;
1055 GRETH_REGSAVE(regs->control, cfg);
1058 static u32 greth_get_msglevel(struct net_device *dev)
1060 struct greth_private *greth = netdev_priv(dev);
1061 return greth->msg_enable;
1064 static void greth_set_msglevel(struct net_device *dev, u32 value)
1066 struct greth_private *greth = netdev_priv(dev);
1067 greth->msg_enable = value;
1069 static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 struct greth_private *greth = netdev_priv(dev);
1072 struct phy_device *phy = greth->phy;
1077 return phy_ethtool_gset(phy, cmd);
1080 static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1082 struct greth_private *greth = netdev_priv(dev);
1083 struct phy_device *phy = greth->phy;
1088 return phy_ethtool_sset(phy, cmd);
1091 static int greth_get_regs_len(struct net_device *dev)
1093 return sizeof(struct greth_regs);
1096 static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1098 struct greth_private *greth = netdev_priv(dev);
1100 strncpy(info->driver, dev_driver_string(greth->dev), 32);
1101 strncpy(info->version, "revision: 1.0", 32);
1102 strncpy(info->bus_info, greth->dev->bus->name, 32);
1103 strncpy(info->fw_version, "N/A", 32);
1104 info->eedump_len = 0;
1105 info->regdump_len = sizeof(struct greth_regs);
1108 static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1111 struct greth_private *greth = netdev_priv(dev);
1112 u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1115 for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1116 buff[i] = greth_read_bd(&greth_regs[i]);
1119 static u32 greth_get_rx_csum(struct net_device *dev)
1121 struct greth_private *greth = netdev_priv(dev);
1122 return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
1125 static int greth_set_rx_csum(struct net_device *dev, u32 data)
1127 struct greth_private *greth = netdev_priv(dev);
1129 spin_lock_bh(&greth->devlock);
1132 greth->flags |= GRETH_FLAG_RX_CSUM;
1134 greth->flags &= ~GRETH_FLAG_RX_CSUM;
1136 spin_unlock_bh(&greth->devlock);
1141 static u32 greth_get_tx_csum(struct net_device *dev)
1143 return (dev->features & NETIF_F_IP_CSUM) != 0;
1146 static int greth_set_tx_csum(struct net_device *dev, u32 data)
1148 netif_tx_lock_bh(dev);
1149 ethtool_op_set_tx_csum(dev, data);
1150 netif_tx_unlock_bh(dev);
1154 static const struct ethtool_ops greth_ethtool_ops = {
1155 .get_msglevel = greth_get_msglevel,
1156 .set_msglevel = greth_set_msglevel,
1157 .get_settings = greth_get_settings,
1158 .set_settings = greth_set_settings,
1159 .get_drvinfo = greth_get_drvinfo,
1160 .get_regs_len = greth_get_regs_len,
1161 .get_regs = greth_get_regs,
1162 .get_rx_csum = greth_get_rx_csum,
1163 .set_rx_csum = greth_set_rx_csum,
1164 .get_tx_csum = greth_get_tx_csum,
1165 .set_tx_csum = greth_set_tx_csum,
1166 .get_link = ethtool_op_get_link,
1169 static struct net_device_ops greth_netdev_ops = {
1170 .ndo_open = greth_open,
1171 .ndo_stop = greth_close,
1172 .ndo_start_xmit = greth_start_xmit,
1173 .ndo_set_mac_address = greth_set_mac_add,
1174 .ndo_validate_addr = eth_validate_addr,
1177 static inline int wait_for_mdio(struct greth_private *greth)
1179 unsigned long timeout = jiffies + 4*HZ/100;
1180 while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1181 if (time_after(jiffies, timeout))
1187 static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1189 struct greth_private *greth = bus->priv;
1192 if (!wait_for_mdio(greth))
1195 GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1197 if (!wait_for_mdio(greth))
1200 if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1201 data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1209 static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1211 struct greth_private *greth = bus->priv;
1213 if (!wait_for_mdio(greth))
1216 GRETH_REGSAVE(greth->regs->mdio,
1217 ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1219 if (!wait_for_mdio(greth))
1225 static int greth_mdio_reset(struct mii_bus *bus)
1230 static void greth_link_change(struct net_device *dev)
1232 struct greth_private *greth = netdev_priv(dev);
1233 struct phy_device *phydev = greth->phy;
1234 unsigned long flags;
1235 int status_change = 0;
1238 spin_lock_irqsave(&greth->devlock, flags);
1242 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1243 ctrl = GRETH_REGLOAD(greth->regs->control) &
1244 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1247 ctrl |= GRETH_CTRL_FD;
1249 if (phydev->speed == SPEED_100)
1250 ctrl |= GRETH_CTRL_SP;
1251 else if (phydev->speed == SPEED_1000)
1252 ctrl |= GRETH_CTRL_GB;
1254 GRETH_REGSAVE(greth->regs->control, ctrl);
1255 greth->speed = phydev->speed;
1256 greth->duplex = phydev->duplex;
1261 if (phydev->link != greth->link) {
1262 if (!phydev->link) {
1266 greth->link = phydev->link;
1271 spin_unlock_irqrestore(&greth->devlock, flags);
1273 if (status_change) {
1275 pr_debug("%s: link up (%d/%s)\n",
1276 dev->name, phydev->speed,
1277 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1279 pr_debug("%s: link down\n", dev->name);
1283 static int greth_mdio_probe(struct net_device *dev)
1285 struct greth_private *greth = netdev_priv(dev);
1286 struct phy_device *phy = NULL;
1289 /* Find the first PHY */
1290 phy = phy_find_first(greth->mdio);
1293 if (netif_msg_probe(greth))
1294 dev_err(&dev->dev, "no PHY found\n");
1298 ret = phy_connect_direct(dev, phy, &greth_link_change,
1299 0, greth->gbit_mac ?
1300 PHY_INTERFACE_MODE_GMII :
1301 PHY_INTERFACE_MODE_MII);
1303 if (netif_msg_ifup(greth))
1304 dev_err(&dev->dev, "could not attach to PHY\n");
1308 if (greth->gbit_mac)
1309 phy->supported &= PHY_GBIT_FEATURES;
1311 phy->supported &= PHY_BASIC_FEATURES;
1313 phy->advertising = phy->supported;
1323 static inline int phy_aneg_done(struct phy_device *phydev)
1327 retval = phy_read(phydev, MII_BMSR);
1329 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1332 static int greth_mdio_init(struct greth_private *greth)
1335 unsigned long timeout;
1337 greth->mdio = mdiobus_alloc();
1342 greth->mdio->name = "greth-mdio";
1343 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1344 greth->mdio->read = greth_mdio_read;
1345 greth->mdio->write = greth_mdio_write;
1346 greth->mdio->reset = greth_mdio_reset;
1347 greth->mdio->priv = greth;
1349 greth->mdio->irq = greth->mdio_irqs;
1351 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1352 greth->mdio->irq[phy] = PHY_POLL;
1354 ret = mdiobus_register(greth->mdio);
1359 ret = greth_mdio_probe(greth->netdev);
1361 if (netif_msg_probe(greth))
1362 dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1366 phy_start(greth->phy);
1368 /* If Ethernet debug link is used make autoneg happen right away */
1369 if (greth->edcl && greth_edcl == 1) {
1370 phy_start_aneg(greth->phy);
1371 timeout = jiffies + 6*HZ;
1372 while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1374 genphy_read_status(greth->phy);
1375 greth_link_change(greth->netdev);
1381 mdiobus_unregister(greth->mdio);
1383 mdiobus_free(greth->mdio);
1387 /* Initialize the GRETH MAC */
1388 static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
1390 struct net_device *dev;
1391 struct greth_private *greth;
1392 struct greth_regs *regs;
1397 unsigned long timeout;
1399 dev = alloc_etherdev(sizeof(struct greth_private));
1404 greth = netdev_priv(dev);
1405 greth->netdev = dev;
1406 greth->dev = &ofdev->dev;
1408 if (greth_debug > 0)
1409 greth->msg_enable = greth_debug;
1411 greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1413 spin_lock_init(&greth->devlock);
1415 greth->regs = of_ioremap(&ofdev->resource[0], 0,
1416 resource_size(&ofdev->resource[0]),
1417 "grlib-greth regs");
1419 if (greth->regs == NULL) {
1420 if (netif_msg_probe(greth))
1421 dev_err(greth->dev, "ioremap failure.\n");
1426 regs = (struct greth_regs *) greth->regs;
1427 greth->irq = ofdev->archdata.irqs[0];
1429 dev_set_drvdata(greth->dev, dev);
1430 SET_NETDEV_DEV(dev, greth->dev);
1432 if (netif_msg_probe(greth))
1433 dev_dbg(greth->dev, "reseting controller.\n");
1435 /* Reset the controller. */
1436 GRETH_REGSAVE(regs->control, GRETH_RESET);
1438 /* Wait for MAC to reset itself */
1439 timeout = jiffies + HZ/100;
1440 while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1441 if (time_after(jiffies, timeout)) {
1443 if (netif_msg_probe(greth))
1444 dev_err(greth->dev, "timeout when waiting for reset.\n");
1449 /* Get default PHY address */
1450 greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1452 /* Check if we have GBIT capable MAC */
1453 tmp = GRETH_REGLOAD(regs->control);
1454 greth->gbit_mac = (tmp >> 27) & 1;
1456 /* Check for multicast capability */
1457 greth->multicast = (tmp >> 25) & 1;
1459 greth->edcl = (tmp >> 31) & 1;
1461 /* If we have EDCL we disable the EDCL speed-duplex FSM so
1462 * it doesn't interfere with the software */
1463 if (greth->edcl != 0)
1464 GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1466 /* Check if MAC can handle MDIO interrupts */
1467 greth->mdio_int_en = (tmp >> 26) & 1;
1469 err = greth_mdio_init(greth);
1471 if (netif_msg_probe(greth))
1472 dev_err(greth->dev, "failed to register MDIO bus\n");
1476 /* Allocate TX descriptor ring in coherent memory */
1477 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1479 &greth->tx_bd_base_phys,
1482 if (!greth->tx_bd_base) {
1483 if (netif_msg_probe(greth))
1484 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1489 memset(greth->tx_bd_base, 0, 1024);
1491 /* Allocate RX descriptor ring in coherent memory */
1492 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1494 &greth->rx_bd_base_phys,
1497 if (!greth->rx_bd_base) {
1498 if (netif_msg_probe(greth))
1499 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1504 memset(greth->rx_bd_base, 0, 1024);
1506 /* Get MAC address from: module param, OF property or ID prom */
1507 for (i = 0; i < 6; i++) {
1508 if (macaddr[i] != 0)
1512 const unsigned char *addr;
1514 addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1516 if (addr != NULL && len == 6) {
1517 for (i = 0; i < 6; i++)
1518 macaddr[i] = (unsigned int) addr[i];
1521 for (i = 0; i < 6; i++)
1522 macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1527 for (i = 0; i < 6; i++)
1528 dev->dev_addr[i] = macaddr[i];
1532 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1533 if (netif_msg_probe(greth))
1534 dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1539 GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1540 GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1541 dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1543 /* Clear all pending interrupts except PHY irq */
1544 GRETH_REGSAVE(regs->status, 0xFF);
1546 if (greth->gbit_mac) {
1547 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
1548 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
1549 greth->flags = GRETH_FLAG_RX_CSUM;
1552 if (greth->multicast) {
1553 greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
1554 dev->flags |= IFF_MULTICAST;
1556 dev->flags &= ~IFF_MULTICAST;
1559 dev->netdev_ops = &greth_netdev_ops;
1560 dev->ethtool_ops = &greth_ethtool_ops;
1562 err = register_netdev(dev);
1564 if (netif_msg_probe(greth))
1565 dev_err(greth->dev, "netdevice registration failed.\n");
1570 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1575 dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1577 dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1579 mdiobus_unregister(greth->mdio);
1581 of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1587 static int __devexit greth_of_remove(struct platform_device *of_dev)
1589 struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1590 struct greth_private *greth = netdev_priv(ndev);
1592 /* Free descriptor areas */
1593 dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1595 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1597 dev_set_drvdata(&of_dev->dev, NULL);
1600 phy_stop(greth->phy);
1601 mdiobus_unregister(greth->mdio);
1603 unregister_netdev(ndev);
1606 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1611 static struct of_device_id greth_of_match[] = {
1613 .name = "GAISLER_ETHMAC",
1621 MODULE_DEVICE_TABLE(of, greth_of_match);
1623 static struct of_platform_driver greth_of_driver = {
1625 .name = "grlib-greth",
1626 .owner = THIS_MODULE,
1627 .of_match_table = greth_of_match,
1629 .probe = greth_of_probe,
1630 .remove = __devexit_p(greth_of_remove),
1633 static int __init greth_init(void)
1635 return of_register_platform_driver(&greth_of_driver);
1638 static void __exit greth_cleanup(void)
1640 of_unregister_platform_driver(&greth_of_driver);
1643 module_init(greth_init);
1644 module_exit(greth_cleanup);
1646 MODULE_AUTHOR("Aeroflex Gaisler AB.");
1647 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1648 MODULE_LICENSE("GPL");