powerpc: Fix build bug with binutils < 2.18 and GCC < 4.2
[pandora-kernel.git] / drivers / net / fs_enet / fs_enet-main.c
1 /*
2  * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  *  by Pantelis Antoniou <panto@intracom.gr>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <vbordug@ru.mvista.com>
9  *
10  * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11  * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12  *
13  * This file is licensed under the terms of the GNU General Public License
14  * version 2. This program is licensed "as is" without any warranty of any
15  * kind, whether express or implied.
16  */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/ptrace.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/bitops.h>
36 #include <linux/fs.h>
37 #include <linux/platform_device.h>
38 #include <linux/phy.h>
39 #include <linux/of_platform.h>
40
41 #include <linux/vmalloc.h>
42 #include <asm/pgtable.h>
43 #include <asm/irq.h>
44 #include <asm/uaccess.h>
45
46 #include "fs_enet.h"
47
48 /*************************************************/
49
50 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
51 MODULE_DESCRIPTION("Freescale Ethernet Driver");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_MODULE_VERSION);
54
55 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
56 module_param(fs_enet_debug, int, 0);
57 MODULE_PARM_DESC(fs_enet_debug,
58                  "Freescale bitmapped debugging message enable value");
59
60 #ifdef CONFIG_NET_POLL_CONTROLLER
61 static void fs_enet_netpoll(struct net_device *dev);
62 #endif
63
64 static void fs_set_multicast_list(struct net_device *dev)
65 {
66         struct fs_enet_private *fep = netdev_priv(dev);
67
68         (*fep->ops->set_multicast_list)(dev);
69 }
70
71 static void skb_align(struct sk_buff *skb, int align)
72 {
73         int off = ((unsigned long)skb->data) & (align - 1);
74
75         if (off)
76                 skb_reserve(skb, align - off);
77 }
78
79 /* NAPI receive function */
80 static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
81 {
82         struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
83         struct net_device *dev = fep->ndev;
84         const struct fs_platform_info *fpi = fep->fpi;
85         cbd_t __iomem *bdp;
86         struct sk_buff *skb, *skbn, *skbt;
87         int received = 0;
88         u16 pkt_len, sc;
89         int curidx;
90
91         /*
92          * First, grab all of the stats for the incoming packet.
93          * These get messed up if we get called due to a busy condition.
94          */
95         bdp = fep->cur_rx;
96
97         /* clear RX status bits for napi*/
98         (*fep->ops->napi_clear_rx_event)(dev);
99
100         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
101                 curidx = bdp - fep->rx_bd_base;
102
103                 /*
104                  * Since we have allocated space to hold a complete frame,
105                  * the last indicator should be set.
106                  */
107                 if ((sc & BD_ENET_RX_LAST) == 0)
108                         printk(KERN_WARNING DRV_MODULE_NAME
109                                ": %s rcv is not +last\n",
110                                dev->name);
111
112                 /*
113                  * Check for errors.
114                  */
115                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
116                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
117                         fep->stats.rx_errors++;
118                         /* Frame too long or too short. */
119                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
120                                 fep->stats.rx_length_errors++;
121                         /* Frame alignment */
122                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
123                                 fep->stats.rx_frame_errors++;
124                         /* CRC Error */
125                         if (sc & BD_ENET_RX_CR)
126                                 fep->stats.rx_crc_errors++;
127                         /* FIFO overrun */
128                         if (sc & BD_ENET_RX_OV)
129                                 fep->stats.rx_crc_errors++;
130
131                         skb = fep->rx_skbuff[curidx];
132
133                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
134                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
135                                 DMA_FROM_DEVICE);
136
137                         skbn = skb;
138
139                 } else {
140                         skb = fep->rx_skbuff[curidx];
141
142                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
143                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
144                                 DMA_FROM_DEVICE);
145
146                         /*
147                          * Process the incoming frame.
148                          */
149                         fep->stats.rx_packets++;
150                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
151                         fep->stats.rx_bytes += pkt_len + 4;
152
153                         if (pkt_len <= fpi->rx_copybreak) {
154                                 /* +2 to make IP header L1 cache aligned */
155                                 skbn = dev_alloc_skb(pkt_len + 2);
156                                 if (skbn != NULL) {
157                                         skb_reserve(skbn, 2);   /* align IP header */
158                                         skb_copy_from_linear_data(skb,
159                                                       skbn->data, pkt_len);
160                                         /* swap */
161                                         skbt = skb;
162                                         skb = skbn;
163                                         skbn = skbt;
164                                 }
165                         } else {
166                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
167
168                                 if (skbn)
169                                         skb_align(skbn, ENET_RX_ALIGN);
170                         }
171
172                         if (skbn != NULL) {
173                                 skb_put(skb, pkt_len);  /* Make room */
174                                 skb->protocol = eth_type_trans(skb, dev);
175                                 received++;
176                                 netif_receive_skb(skb);
177                         } else {
178                                 printk(KERN_WARNING DRV_MODULE_NAME
179                                        ": %s Memory squeeze, dropping packet.\n",
180                                        dev->name);
181                                 fep->stats.rx_dropped++;
182                                 skbn = skb;
183                         }
184                 }
185
186                 fep->rx_skbuff[curidx] = skbn;
187                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
188                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
189                              DMA_FROM_DEVICE));
190                 CBDW_DATLEN(bdp, 0);
191                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
192
193                 /*
194                  * Update BD pointer to next entry.
195                  */
196                 if ((sc & BD_ENET_RX_WRAP) == 0)
197                         bdp++;
198                 else
199                         bdp = fep->rx_bd_base;
200
201                 (*fep->ops->rx_bd_done)(dev);
202
203                 if (received >= budget)
204                         break;
205         }
206
207         fep->cur_rx = bdp;
208
209         if (received < budget) {
210                 /* done */
211                 netif_rx_complete(dev, napi);
212                 (*fep->ops->napi_enable_rx)(dev);
213         }
214         return received;
215 }
216
217 /* non NAPI receive function */
218 static int fs_enet_rx_non_napi(struct net_device *dev)
219 {
220         struct fs_enet_private *fep = netdev_priv(dev);
221         const struct fs_platform_info *fpi = fep->fpi;
222         cbd_t __iomem *bdp;
223         struct sk_buff *skb, *skbn, *skbt;
224         int received = 0;
225         u16 pkt_len, sc;
226         int curidx;
227         /*
228          * First, grab all of the stats for the incoming packet.
229          * These get messed up if we get called due to a busy condition.
230          */
231         bdp = fep->cur_rx;
232
233         while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
234
235                 curidx = bdp - fep->rx_bd_base;
236
237                 /*
238                  * Since we have allocated space to hold a complete frame,
239                  * the last indicator should be set.
240                  */
241                 if ((sc & BD_ENET_RX_LAST) == 0)
242                         printk(KERN_WARNING DRV_MODULE_NAME
243                                ": %s rcv is not +last\n",
244                                dev->name);
245
246                 /*
247                  * Check for errors.
248                  */
249                 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
250                           BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
251                         fep->stats.rx_errors++;
252                         /* Frame too long or too short. */
253                         if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
254                                 fep->stats.rx_length_errors++;
255                         /* Frame alignment */
256                         if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
257                                 fep->stats.rx_frame_errors++;
258                         /* CRC Error */
259                         if (sc & BD_ENET_RX_CR)
260                                 fep->stats.rx_crc_errors++;
261                         /* FIFO overrun */
262                         if (sc & BD_ENET_RX_OV)
263                                 fep->stats.rx_crc_errors++;
264
265                         skb = fep->rx_skbuff[curidx];
266
267                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
268                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
269                                 DMA_FROM_DEVICE);
270
271                         skbn = skb;
272
273                 } else {
274
275                         skb = fep->rx_skbuff[curidx];
276
277                         dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
278                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
279                                 DMA_FROM_DEVICE);
280
281                         /*
282                          * Process the incoming frame.
283                          */
284                         fep->stats.rx_packets++;
285                         pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
286                         fep->stats.rx_bytes += pkt_len + 4;
287
288                         if (pkt_len <= fpi->rx_copybreak) {
289                                 /* +2 to make IP header L1 cache aligned */
290                                 skbn = dev_alloc_skb(pkt_len + 2);
291                                 if (skbn != NULL) {
292                                         skb_reserve(skbn, 2);   /* align IP header */
293                                         skb_copy_from_linear_data(skb,
294                                                       skbn->data, pkt_len);
295                                         /* swap */
296                                         skbt = skb;
297                                         skb = skbn;
298                                         skbn = skbt;
299                                 }
300                         } else {
301                                 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
302
303                                 if (skbn)
304                                         skb_align(skbn, ENET_RX_ALIGN);
305                         }
306
307                         if (skbn != NULL) {
308                                 skb_put(skb, pkt_len);  /* Make room */
309                                 skb->protocol = eth_type_trans(skb, dev);
310                                 received++;
311                                 netif_rx(skb);
312                         } else {
313                                 printk(KERN_WARNING DRV_MODULE_NAME
314                                        ": %s Memory squeeze, dropping packet.\n",
315                                        dev->name);
316                                 fep->stats.rx_dropped++;
317                                 skbn = skb;
318                         }
319                 }
320
321                 fep->rx_skbuff[curidx] = skbn;
322                 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
323                              L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
324                              DMA_FROM_DEVICE));
325                 CBDW_DATLEN(bdp, 0);
326                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
327
328                 /*
329                  * Update BD pointer to next entry.
330                  */
331                 if ((sc & BD_ENET_RX_WRAP) == 0)
332                         bdp++;
333                 else
334                         bdp = fep->rx_bd_base;
335
336                 (*fep->ops->rx_bd_done)(dev);
337         }
338
339         fep->cur_rx = bdp;
340
341         return 0;
342 }
343
344 static void fs_enet_tx(struct net_device *dev)
345 {
346         struct fs_enet_private *fep = netdev_priv(dev);
347         cbd_t __iomem *bdp;
348         struct sk_buff *skb;
349         int dirtyidx, do_wake, do_restart;
350         u16 sc;
351
352         spin_lock(&fep->tx_lock);
353         bdp = fep->dirty_tx;
354
355         do_wake = do_restart = 0;
356         while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
357                 dirtyidx = bdp - fep->tx_bd_base;
358
359                 if (fep->tx_free == fep->tx_ring)
360                         break;
361
362                 skb = fep->tx_skbuff[dirtyidx];
363
364                 /*
365                  * Check for errors.
366                  */
367                 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
368                           BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
369
370                         if (sc & BD_ENET_TX_HB) /* No heartbeat */
371                                 fep->stats.tx_heartbeat_errors++;
372                         if (sc & BD_ENET_TX_LC) /* Late collision */
373                                 fep->stats.tx_window_errors++;
374                         if (sc & BD_ENET_TX_RL) /* Retrans limit */
375                                 fep->stats.tx_aborted_errors++;
376                         if (sc & BD_ENET_TX_UN) /* Underrun */
377                                 fep->stats.tx_fifo_errors++;
378                         if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
379                                 fep->stats.tx_carrier_errors++;
380
381                         if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
382                                 fep->stats.tx_errors++;
383                                 do_restart = 1;
384                         }
385                 } else
386                         fep->stats.tx_packets++;
387
388                 if (sc & BD_ENET_TX_READY)
389                         printk(KERN_WARNING DRV_MODULE_NAME
390                                ": %s HEY! Enet xmit interrupt and TX_READY.\n",
391                                dev->name);
392
393                 /*
394                  * Deferred means some collisions occurred during transmit,
395                  * but we eventually sent the packet OK.
396                  */
397                 if (sc & BD_ENET_TX_DEF)
398                         fep->stats.collisions++;
399
400                 /* unmap */
401                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
402                                 skb->len, DMA_TO_DEVICE);
403
404                 /*
405                  * Free the sk buffer associated with this last transmit.
406                  */
407                 dev_kfree_skb_irq(skb);
408                 fep->tx_skbuff[dirtyidx] = NULL;
409
410                 /*
411                  * Update pointer to next buffer descriptor to be transmitted.
412                  */
413                 if ((sc & BD_ENET_TX_WRAP) == 0)
414                         bdp++;
415                 else
416                         bdp = fep->tx_bd_base;
417
418                 /*
419                  * Since we have freed up a buffer, the ring is no longer
420                  * full.
421                  */
422                 if (!fep->tx_free++)
423                         do_wake = 1;
424         }
425
426         fep->dirty_tx = bdp;
427
428         if (do_restart)
429                 (*fep->ops->tx_restart)(dev);
430
431         spin_unlock(&fep->tx_lock);
432
433         if (do_wake)
434                 netif_wake_queue(dev);
435 }
436
437 /*
438  * The interrupt handler.
439  * This is called from the MPC core interrupt.
440  */
441 static irqreturn_t
442 fs_enet_interrupt(int irq, void *dev_id)
443 {
444         struct net_device *dev = dev_id;
445         struct fs_enet_private *fep;
446         const struct fs_platform_info *fpi;
447         u32 int_events;
448         u32 int_clr_events;
449         int nr, napi_ok;
450         int handled;
451
452         fep = netdev_priv(dev);
453         fpi = fep->fpi;
454
455         nr = 0;
456         while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
457                 nr++;
458
459                 int_clr_events = int_events;
460                 if (fpi->use_napi)
461                         int_clr_events &= ~fep->ev_napi_rx;
462
463                 (*fep->ops->clear_int_events)(dev, int_clr_events);
464
465                 if (int_events & fep->ev_err)
466                         (*fep->ops->ev_error)(dev, int_events);
467
468                 if (int_events & fep->ev_rx) {
469                         if (!fpi->use_napi)
470                                 fs_enet_rx_non_napi(dev);
471                         else {
472                                 napi_ok = napi_schedule_prep(&fep->napi);
473
474                                 (*fep->ops->napi_disable_rx)(dev);
475                                 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
476
477                                 /* NOTE: it is possible for FCCs in NAPI mode    */
478                                 /* to submit a spurious interrupt while in poll  */
479                                 if (napi_ok)
480                                         __netif_rx_schedule(dev, &fep->napi);
481                         }
482                 }
483
484                 if (int_events & fep->ev_tx)
485                         fs_enet_tx(dev);
486         }
487
488         handled = nr > 0;
489         return IRQ_RETVAL(handled);
490 }
491
492 void fs_init_bds(struct net_device *dev)
493 {
494         struct fs_enet_private *fep = netdev_priv(dev);
495         cbd_t __iomem *bdp;
496         struct sk_buff *skb;
497         int i;
498
499         fs_cleanup_bds(dev);
500
501         fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
502         fep->tx_free = fep->tx_ring;
503         fep->cur_rx = fep->rx_bd_base;
504
505         /*
506          * Initialize the receive buffer descriptors.
507          */
508         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
509                 skb = dev_alloc_skb(ENET_RX_FRSIZE);
510                 if (skb == NULL) {
511                         printk(KERN_WARNING DRV_MODULE_NAME
512                                ": %s Memory squeeze, unable to allocate skb\n",
513                                dev->name);
514                         break;
515                 }
516                 skb_align(skb, ENET_RX_ALIGN);
517                 fep->rx_skbuff[i] = skb;
518                 CBDW_BUFADDR(bdp,
519                         dma_map_single(fep->dev, skb->data,
520                                 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
521                                 DMA_FROM_DEVICE));
522                 CBDW_DATLEN(bdp, 0);    /* zero */
523                 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
524                         ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
525         }
526         /*
527          * if we failed, fillup remainder
528          */
529         for (; i < fep->rx_ring; i++, bdp++) {
530                 fep->rx_skbuff[i] = NULL;
531                 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
532         }
533
534         /*
535          * ...and the same for transmit.
536          */
537         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
538                 fep->tx_skbuff[i] = NULL;
539                 CBDW_BUFADDR(bdp, 0);
540                 CBDW_DATLEN(bdp, 0);
541                 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
542         }
543 }
544
545 void fs_cleanup_bds(struct net_device *dev)
546 {
547         struct fs_enet_private *fep = netdev_priv(dev);
548         struct sk_buff *skb;
549         cbd_t __iomem *bdp;
550         int i;
551
552         /*
553          * Reset SKB transmit buffers.
554          */
555         for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
556                 if ((skb = fep->tx_skbuff[i]) == NULL)
557                         continue;
558
559                 /* unmap */
560                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
561                                 skb->len, DMA_TO_DEVICE);
562
563                 fep->tx_skbuff[i] = NULL;
564                 dev_kfree_skb(skb);
565         }
566
567         /*
568          * Reset SKB receive buffers
569          */
570         for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
571                 if ((skb = fep->rx_skbuff[i]) == NULL)
572                         continue;
573
574                 /* unmap */
575                 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
576                         L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
577                         DMA_FROM_DEVICE);
578
579                 fep->rx_skbuff[i] = NULL;
580
581                 dev_kfree_skb(skb);
582         }
583 }
584
585 /**********************************************************************************/
586
587 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
588 {
589         struct fs_enet_private *fep = netdev_priv(dev);
590         cbd_t __iomem *bdp;
591         int curidx;
592         u16 sc;
593         unsigned long flags;
594
595         spin_lock_irqsave(&fep->tx_lock, flags);
596
597         /*
598          * Fill in a Tx ring entry
599          */
600         bdp = fep->cur_tx;
601
602         if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
603                 netif_stop_queue(dev);
604                 spin_unlock_irqrestore(&fep->tx_lock, flags);
605
606                 /*
607                  * Ooops.  All transmit buffers are full.  Bail out.
608                  * This should not happen, since the tx queue should be stopped.
609                  */
610                 printk(KERN_WARNING DRV_MODULE_NAME
611                        ": %s tx queue full!.\n", dev->name);
612                 return NETDEV_TX_BUSY;
613         }
614
615         curidx = bdp - fep->tx_bd_base;
616         /*
617          * Clear all of the status flags.
618          */
619         CBDC_SC(bdp, BD_ENET_TX_STATS);
620
621         /*
622          * Save skb pointer.
623          */
624         fep->tx_skbuff[curidx] = skb;
625
626         fep->stats.tx_bytes += skb->len;
627
628         /*
629          * Push the data cache so the CPM does not get stale memory data.
630          */
631         CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
632                                 skb->data, skb->len, DMA_TO_DEVICE));
633         CBDW_DATLEN(bdp, skb->len);
634
635         dev->trans_start = jiffies;
636
637         /*
638          * If this was the last BD in the ring, start at the beginning again.
639          */
640         if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
641                 fep->cur_tx++;
642         else
643                 fep->cur_tx = fep->tx_bd_base;
644
645         if (!--fep->tx_free)
646                 netif_stop_queue(dev);
647
648         /* Trigger transmission start */
649         sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
650              BD_ENET_TX_LAST | BD_ENET_TX_TC;
651
652         /* note that while FEC does not have this bit
653          * it marks it as available for software use
654          * yay for hw reuse :) */
655         if (skb->len <= 60)
656                 sc |= BD_ENET_TX_PAD;
657         CBDS_SC(bdp, sc);
658
659         (*fep->ops->tx_kickstart)(dev);
660
661         spin_unlock_irqrestore(&fep->tx_lock, flags);
662
663         return NETDEV_TX_OK;
664 }
665
666 static int fs_request_irq(struct net_device *dev, int irq, const char *name,
667                 irq_handler_t irqf)
668 {
669         struct fs_enet_private *fep = netdev_priv(dev);
670
671         (*fep->ops->pre_request_irq)(dev, irq);
672         return request_irq(irq, irqf, IRQF_SHARED, name, dev);
673 }
674
675 static void fs_free_irq(struct net_device *dev, int irq)
676 {
677         struct fs_enet_private *fep = netdev_priv(dev);
678
679         free_irq(irq, dev);
680         (*fep->ops->post_free_irq)(dev, irq);
681 }
682
683 static void fs_timeout(struct net_device *dev)
684 {
685         struct fs_enet_private *fep = netdev_priv(dev);
686         unsigned long flags;
687         int wake = 0;
688
689         fep->stats.tx_errors++;
690
691         spin_lock_irqsave(&fep->lock, flags);
692
693         if (dev->flags & IFF_UP) {
694                 phy_stop(fep->phydev);
695                 (*fep->ops->stop)(dev);
696                 (*fep->ops->restart)(dev);
697                 phy_start(fep->phydev);
698         }
699
700         phy_start(fep->phydev);
701         wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
702         spin_unlock_irqrestore(&fep->lock, flags);
703
704         if (wake)
705                 netif_wake_queue(dev);
706 }
707
708 /*-----------------------------------------------------------------------------
709  *  generic link-change handler - should be sufficient for most cases
710  *-----------------------------------------------------------------------------*/
711 static void generic_adjust_link(struct  net_device *dev)
712 {
713         struct fs_enet_private *fep = netdev_priv(dev);
714         struct phy_device *phydev = fep->phydev;
715         int new_state = 0;
716
717         if (phydev->link) {
718                 /* adjust to duplex mode */
719                 if (phydev->duplex != fep->oldduplex) {
720                         new_state = 1;
721                         fep->oldduplex = phydev->duplex;
722                 }
723
724                 if (phydev->speed != fep->oldspeed) {
725                         new_state = 1;
726                         fep->oldspeed = phydev->speed;
727                 }
728
729                 if (!fep->oldlink) {
730                         new_state = 1;
731                         fep->oldlink = 1;
732                         netif_schedule(dev);
733                         netif_carrier_on(dev);
734                         netif_start_queue(dev);
735                 }
736
737                 if (new_state)
738                         fep->ops->restart(dev);
739         } else if (fep->oldlink) {
740                 new_state = 1;
741                 fep->oldlink = 0;
742                 fep->oldspeed = 0;
743                 fep->oldduplex = -1;
744                 netif_carrier_off(dev);
745                 netif_stop_queue(dev);
746         }
747
748         if (new_state && netif_msg_link(fep))
749                 phy_print_status(phydev);
750 }
751
752
753 static void fs_adjust_link(struct net_device *dev)
754 {
755         struct fs_enet_private *fep = netdev_priv(dev);
756         unsigned long flags;
757
758         spin_lock_irqsave(&fep->lock, flags);
759
760         if(fep->ops->adjust_link)
761                 fep->ops->adjust_link(dev);
762         else
763                 generic_adjust_link(dev);
764
765         spin_unlock_irqrestore(&fep->lock, flags);
766 }
767
768 static int fs_init_phy(struct net_device *dev)
769 {
770         struct fs_enet_private *fep = netdev_priv(dev);
771         struct phy_device *phydev;
772
773         fep->oldlink = 0;
774         fep->oldspeed = 0;
775         fep->oldduplex = -1;
776         if(fep->fpi->bus_id)
777                 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
778                                 PHY_INTERFACE_MODE_MII);
779         else {
780                 printk("No phy bus ID specified in BSP code\n");
781                 return -EINVAL;
782         }
783         if (IS_ERR(phydev)) {
784                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
785                 return PTR_ERR(phydev);
786         }
787
788         fep->phydev = phydev;
789
790         return 0;
791 }
792
793 static int fs_enet_open(struct net_device *dev)
794 {
795         struct fs_enet_private *fep = netdev_priv(dev);
796         int r;
797         int err;
798
799         if (fep->fpi->use_napi)
800                 napi_enable(&fep->napi);
801
802         /* Install our interrupt handler. */
803         r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
804         if (r != 0) {
805                 printk(KERN_ERR DRV_MODULE_NAME
806                        ": %s Could not allocate FS_ENET IRQ!", dev->name);
807                 if (fep->fpi->use_napi)
808                         napi_disable(&fep->napi);
809                 return -EINVAL;
810         }
811
812         err = fs_init_phy(dev);
813         if (err) {
814                 if (fep->fpi->use_napi)
815                         napi_disable(&fep->napi);
816                 return err;
817         }
818         phy_start(fep->phydev);
819
820         return 0;
821 }
822
823 static int fs_enet_close(struct net_device *dev)
824 {
825         struct fs_enet_private *fep = netdev_priv(dev);
826         unsigned long flags;
827
828         netif_stop_queue(dev);
829         netif_carrier_off(dev);
830         if (fep->fpi->use_napi)
831                 napi_disable(&fep->napi);
832         phy_stop(fep->phydev);
833
834         spin_lock_irqsave(&fep->lock, flags);
835         spin_lock(&fep->tx_lock);
836         (*fep->ops->stop)(dev);
837         spin_unlock(&fep->tx_lock);
838         spin_unlock_irqrestore(&fep->lock, flags);
839
840         /* release any irqs */
841         phy_disconnect(fep->phydev);
842         fep->phydev = NULL;
843         fs_free_irq(dev, fep->interrupt);
844
845         return 0;
846 }
847
848 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
849 {
850         struct fs_enet_private *fep = netdev_priv(dev);
851         return &fep->stats;
852 }
853
854 /*************************************************************************/
855
856 static void fs_get_drvinfo(struct net_device *dev,
857                             struct ethtool_drvinfo *info)
858 {
859         strcpy(info->driver, DRV_MODULE_NAME);
860         strcpy(info->version, DRV_MODULE_VERSION);
861 }
862
863 static int fs_get_regs_len(struct net_device *dev)
864 {
865         struct fs_enet_private *fep = netdev_priv(dev);
866
867         return (*fep->ops->get_regs_len)(dev);
868 }
869
870 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
871                          void *p)
872 {
873         struct fs_enet_private *fep = netdev_priv(dev);
874         unsigned long flags;
875         int r, len;
876
877         len = regs->len;
878
879         spin_lock_irqsave(&fep->lock, flags);
880         r = (*fep->ops->get_regs)(dev, p, &len);
881         spin_unlock_irqrestore(&fep->lock, flags);
882
883         if (r == 0)
884                 regs->version = 0;
885 }
886
887 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
888 {
889         struct fs_enet_private *fep = netdev_priv(dev);
890
891         if (!fep->phydev)
892                 return -ENODEV;
893
894         return phy_ethtool_gset(fep->phydev, cmd);
895 }
896
897 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
898 {
899         struct fs_enet_private *fep = netdev_priv(dev);
900
901         if (!fep->phydev)
902                 return -ENODEV;
903
904         return phy_ethtool_sset(fep->phydev, cmd);
905 }
906
907 static int fs_nway_reset(struct net_device *dev)
908 {
909         return 0;
910 }
911
912 static u32 fs_get_msglevel(struct net_device *dev)
913 {
914         struct fs_enet_private *fep = netdev_priv(dev);
915         return fep->msg_enable;
916 }
917
918 static void fs_set_msglevel(struct net_device *dev, u32 value)
919 {
920         struct fs_enet_private *fep = netdev_priv(dev);
921         fep->msg_enable = value;
922 }
923
924 static const struct ethtool_ops fs_ethtool_ops = {
925         .get_drvinfo = fs_get_drvinfo,
926         .get_regs_len = fs_get_regs_len,
927         .get_settings = fs_get_settings,
928         .set_settings = fs_set_settings,
929         .nway_reset = fs_nway_reset,
930         .get_link = ethtool_op_get_link,
931         .get_msglevel = fs_get_msglevel,
932         .set_msglevel = fs_set_msglevel,
933         .set_tx_csum = ethtool_op_set_tx_csum,  /* local! */
934         .set_sg = ethtool_op_set_sg,
935         .get_regs = fs_get_regs,
936 };
937
938 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
939 {
940         struct fs_enet_private *fep = netdev_priv(dev);
941         struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
942
943         if (!netif_running(dev))
944                 return -EINVAL;
945
946         return phy_mii_ioctl(fep->phydev, mii, cmd);
947 }
948
949 extern int fs_mii_connect(struct net_device *dev);
950 extern void fs_mii_disconnect(struct net_device *dev);
951
952 /**************************************************************************************/
953
954 /* handy pointer to the immap */
955 void __iomem *fs_enet_immap = NULL;
956
957 static int setup_immap(void)
958 {
959 #ifdef CONFIG_CPM1
960         fs_enet_immap = ioremap(IMAP_ADDR, 0x4000);
961         WARN_ON(!fs_enet_immap);
962 #elif defined(CONFIG_CPM2)
963         fs_enet_immap = cpm2_immr;
964 #endif
965
966         return 0;
967 }
968
969 static void cleanup_immap(void)
970 {
971 #if defined(CONFIG_CPM1)
972         iounmap(fs_enet_immap);
973 #endif
974 }
975
976 /**************************************************************************************/
977
978 static int __devinit find_phy(struct device_node *np,
979                               struct fs_platform_info *fpi)
980 {
981         struct device_node *phynode, *mdionode;
982         struct resource res;
983         int ret = 0, len;
984         const u32 *data;
985
986         data  = of_get_property(np, "fixed-link", NULL);
987         if (data) {
988                 snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
989                 return 0;
990         }
991
992         data = of_get_property(np, "phy-handle", &len);
993         if (!data || len != 4)
994                 return -EINVAL;
995
996         phynode = of_find_node_by_phandle(*data);
997         if (!phynode)
998                 return -EINVAL;
999
1000         mdionode = of_get_parent(phynode);
1001         if (!mdionode)
1002                 goto out_put_phy;
1003
1004         ret = of_address_to_resource(mdionode, 0, &res);
1005         if (ret)
1006                 goto out_put_mdio;
1007
1008         data = of_get_property(phynode, "reg", &len);
1009         if (!data || len != 4)
1010                 goto out_put_mdio;
1011
1012         snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data);
1013
1014 out_put_mdio:
1015         of_node_put(mdionode);
1016 out_put_phy:
1017         of_node_put(phynode);
1018         return ret;
1019 }
1020
1021 #ifdef CONFIG_FS_ENET_HAS_FEC
1022 #define IS_FEC(match) ((match)->data == &fs_fec_ops)
1023 #else
1024 #define IS_FEC(match) 0
1025 #endif
1026
1027 static int __devinit fs_enet_probe(struct of_device *ofdev,
1028                                    const struct of_device_id *match)
1029 {
1030         struct net_device *ndev;
1031         struct fs_enet_private *fep;
1032         struct fs_platform_info *fpi;
1033         const u32 *data;
1034         const u8 *mac_addr;
1035         int privsize, len, ret = -ENODEV;
1036
1037         fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1038         if (!fpi)
1039                 return -ENOMEM;
1040
1041         if (!IS_FEC(match)) {
1042                 data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
1043                 if (!data || len != 4)
1044                         goto out_free_fpi;
1045
1046                 fpi->cp_command = *data;
1047         }
1048
1049         fpi->rx_ring = 32;
1050         fpi->tx_ring = 32;
1051         fpi->rx_copybreak = 240;
1052         fpi->use_napi = 1;
1053         fpi->napi_weight = 17;
1054
1055         ret = find_phy(ofdev->node, fpi);
1056         if (ret)
1057                 goto out_free_fpi;
1058
1059         privsize = sizeof(*fep) +
1060                    sizeof(struct sk_buff **) *
1061                    (fpi->rx_ring + fpi->tx_ring);
1062
1063         ndev = alloc_etherdev(privsize);
1064         if (!ndev) {
1065                 ret = -ENOMEM;
1066                 goto out_free_fpi;
1067         }
1068
1069         dev_set_drvdata(&ofdev->dev, ndev);
1070
1071         fep = netdev_priv(ndev);
1072         fep->dev = &ofdev->dev;
1073         fep->ndev = ndev;
1074         fep->fpi = fpi;
1075         fep->ops = match->data;
1076
1077         ret = fep->ops->setup_data(ndev);
1078         if (ret)
1079                 goto out_free_dev;
1080
1081         fep->rx_skbuff = (struct sk_buff **)&fep[1];
1082         fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1083
1084         spin_lock_init(&fep->lock);
1085         spin_lock_init(&fep->tx_lock);
1086
1087         mac_addr = of_get_mac_address(ofdev->node);
1088         if (mac_addr)
1089                 memcpy(ndev->dev_addr, mac_addr, 6);
1090
1091         ret = fep->ops->allocate_bd(ndev);
1092         if (ret)
1093                 goto out_cleanup_data;
1094
1095         fep->rx_bd_base = fep->ring_base;
1096         fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1097
1098         fep->tx_ring = fpi->tx_ring;
1099         fep->rx_ring = fpi->rx_ring;
1100
1101         ndev->open = fs_enet_open;
1102         ndev->hard_start_xmit = fs_enet_start_xmit;
1103         ndev->tx_timeout = fs_timeout;
1104         ndev->watchdog_timeo = 2 * HZ;
1105         ndev->stop = fs_enet_close;
1106         ndev->get_stats = fs_enet_get_stats;
1107         ndev->set_multicast_list = fs_set_multicast_list;
1108
1109         if (fpi->use_napi)
1110                 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1111                                fpi->napi_weight);
1112
1113         ndev->ethtool_ops = &fs_ethtool_ops;
1114         ndev->do_ioctl = fs_ioctl;
1115
1116         init_timer(&fep->phy_timer_list);
1117
1118         netif_carrier_off(ndev);
1119
1120         ret = register_netdev(ndev);
1121         if (ret)
1122                 goto out_free_bd;
1123
1124         printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n",
1125                ndev->name,
1126                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1127                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1128
1129         return 0;
1130
1131 out_free_bd:
1132         fep->ops->free_bd(ndev);
1133 out_cleanup_data:
1134         fep->ops->cleanup_data(ndev);
1135 out_free_dev:
1136         free_netdev(ndev);
1137         dev_set_drvdata(&ofdev->dev, NULL);
1138 out_free_fpi:
1139         kfree(fpi);
1140         return ret;
1141 }
1142
1143 static int fs_enet_remove(struct of_device *ofdev)
1144 {
1145         struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
1146         struct fs_enet_private *fep = netdev_priv(ndev);
1147
1148         unregister_netdev(ndev);
1149
1150         fep->ops->free_bd(ndev);
1151         fep->ops->cleanup_data(ndev);
1152         dev_set_drvdata(fep->dev, NULL);
1153
1154         free_netdev(ndev);
1155         return 0;
1156 }
1157
1158 static struct of_device_id fs_enet_match[] = {
1159 #ifdef CONFIG_FS_ENET_HAS_SCC
1160         {
1161                 .compatible = "fsl,cpm1-scc-enet",
1162                 .data = (void *)&fs_scc_ops,
1163         },
1164 #endif
1165 #ifdef CONFIG_FS_ENET_HAS_FCC
1166         {
1167                 .compatible = "fsl,cpm2-fcc-enet",
1168                 .data = (void *)&fs_fcc_ops,
1169         },
1170 #endif
1171 #ifdef CONFIG_FS_ENET_HAS_FEC
1172         {
1173                 .compatible = "fsl,pq1-fec-enet",
1174                 .data = (void *)&fs_fec_ops,
1175         },
1176 #endif
1177         {}
1178 };
1179
1180 static struct of_platform_driver fs_enet_driver = {
1181         .name   = "fs_enet",
1182         .match_table = fs_enet_match,
1183         .probe = fs_enet_probe,
1184         .remove = fs_enet_remove,
1185 };
1186
1187 static int __init fs_init(void)
1188 {
1189         int r = setup_immap();
1190         if (r != 0)
1191                 return r;
1192
1193         r = of_register_platform_driver(&fs_enet_driver);
1194         if (r != 0)
1195                 goto out;
1196
1197         return 0;
1198
1199 out:
1200         cleanup_immap();
1201         return r;
1202 }
1203
1204 static void __exit fs_cleanup(void)
1205 {
1206         of_unregister_platform_driver(&fs_enet_driver);
1207         cleanup_immap();
1208 }
1209
1210 #ifdef CONFIG_NET_POLL_CONTROLLER
1211 static void fs_enet_netpoll(struct net_device *dev)
1212 {
1213        disable_irq(dev->irq);
1214        fs_enet_interrupt(dev->irq, dev, NULL);
1215        enable_irq(dev->irq);
1216 }
1217 #endif
1218
1219 /**************************************************************************************/
1220
1221 module_init(fs_init);
1222 module_exit(fs_cleanup);