Merge branch 'driver-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Copyright 2000,2001  The Linux Kernel Team
5         Written/copyright 1994-2001 by Donald Becker.
6
7         This software may be used and distributed according to the terms
8         of the GNU General Public License, incorporated herein by reference.
9
10         Please submit bugs to http://bugzilla.kernel.org/ .
11 */
12
13 #include <linux/pci.h>
14 #include "tulip.h"
15 #include <linux/etherdevice.h>
16
17 int tulip_rx_copybreak;
18 unsigned int tulip_max_interrupt_work;
19
20 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21 #define MIT_SIZE 15
22 #define MIT_TABLE 15 /* We use 0 or max */
23
24 static unsigned int mit_table[MIT_SIZE+1] =
25 {
26         /*  CRS11 21143 hardware Mitigation Control Interrupt
27             We use only RX mitigation we other techniques for
28             TX intr. mitigation.
29
30            31    Cycle Size (timer control)
31            30:27 TX timer in 16 * Cycle size
32            26:24 TX No pkts before Int.
33            23:20 RX timer in Cycle size
34            19:17 RX No pkts before Int.
35            16       Continues Mode (CM)
36         */
37
38         0x0,             /* IM disabled */
39         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
40         0x80150000,
41         0x80270000,
42         0x80370000,
43         0x80490000,
44         0x80590000,
45         0x80690000,
46         0x807B0000,
47         0x808B0000,
48         0x809D0000,
49         0x80AD0000,
50         0x80BD0000,
51         0x80CF0000,
52         0x80DF0000,
53 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
54         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
55 };
56 #endif
57
58
59 int tulip_refill_rx(struct net_device *dev)
60 {
61         struct tulip_private *tp = netdev_priv(dev);
62         int entry;
63         int refilled = 0;
64
65         /* Refill the Rx ring buffers. */
66         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67                 entry = tp->dirty_rx % RX_RING_SIZE;
68                 if (tp->rx_buffers[entry].skb == NULL) {
69                         struct sk_buff *skb;
70                         dma_addr_t mapping;
71
72                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
73                         if (skb == NULL)
74                                 break;
75
76                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
77                                                  PCI_DMA_FROMDEVICE);
78                         tp->rx_buffers[entry].mapping = mapping;
79
80                         skb->dev = dev;                 /* Mark as being used by this device. */
81                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
82                         refilled++;
83                 }
84                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
85         }
86         if(tp->chip_id == LC82C168) {
87                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
88                         /* Rx stopped due to out of buffers,
89                          * restart it
90                          */
91                         iowrite32(0x01, tp->base_addr + CSR2);
92                 }
93         }
94         return refilled;
95 }
96
97 #ifdef CONFIG_TULIP_NAPI
98
99 void oom_timer(unsigned long data)
100 {
101         struct net_device *dev = (struct net_device *)data;
102         struct tulip_private *tp = netdev_priv(dev);
103         napi_schedule(&tp->napi);
104 }
105
106 int tulip_poll(struct napi_struct *napi, int budget)
107 {
108         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
109         struct net_device *dev = tp->dev;
110         int entry = tp->cur_rx % RX_RING_SIZE;
111         int work_done = 0;
112 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
113         int received = 0;
114 #endif
115
116 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
117
118 /* that one buffer is needed for mit activation; or might be a
119    bug in the ring buffer code; check later -- JHS*/
120
121         if (budget >=RX_RING_SIZE) budget--;
122 #endif
123
124         if (tulip_debug > 4)
125                 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
126                            entry, tp->rx_ring[entry].status);
127
128        do {
129                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
130                         netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
131                         break;
132                 }
133                /* Acknowledge current RX interrupt sources. */
134                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
135
136
137                /* If we own the next entry, it is a new packet. Send it up. */
138                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
139                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
140                        short pkt_len;
141
142                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
143                                break;
144
145                        if (tulip_debug > 5)
146                                 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
147                                            entry, status);
148
149                        if (++work_done >= budget)
150                                goto not_done;
151
152                        /*
153                         * Omit the four octet CRC from the length.
154                         * (May not be considered valid until we have
155                         * checked status for RxLengthOver2047 bits)
156                         */
157                        pkt_len = ((status >> 16) & 0x7ff) - 4;
158
159                        /*
160                         * Maximum pkt_len is 1518 (1514 + vlan header)
161                         * Anything higher than this is always invalid
162                         * regardless of RxLengthOver2047 bits
163                         */
164
165                        if ((status & (RxLengthOver2047 |
166                                       RxDescCRCError |
167                                       RxDescCollisionSeen |
168                                       RxDescRunt |
169                                       RxDescDescErr |
170                                       RxWholePkt)) != RxWholePkt ||
171                            pkt_len > 1518) {
172                                if ((status & (RxLengthOver2047 |
173                                               RxWholePkt)) != RxWholePkt) {
174                                 /* Ingore earlier buffers. */
175                                        if ((status & 0xffff) != 0x7fff) {
176                                                if (tulip_debug > 1)
177                                                        dev_warn(&dev->dev,
178                                                                 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
179                                                                 status);
180                                                 dev->stats.rx_length_errors++;
181                                         }
182                                } else {
183                                 /* There was a fatal error. */
184                                        if (tulip_debug > 2)
185                                                 netdev_dbg(dev, "Receive error, Rx status %08x\n",
186                                                            status);
187                                         dev->stats.rx_errors++; /* end of a packet.*/
188                                         if (pkt_len > 1518 ||
189                                             (status & RxDescRunt))
190                                                 dev->stats.rx_length_errors++;
191
192                                         if (status & 0x0004)
193                                                 dev->stats.rx_frame_errors++;
194                                         if (status & 0x0002)
195                                                 dev->stats.rx_crc_errors++;
196                                         if (status & 0x0001)
197                                                 dev->stats.rx_fifo_errors++;
198                                }
199                        } else {
200                                struct sk_buff *skb;
201
202                                /* Check if the packet is long enough to accept without copying
203                                   to a minimally-sized skbuff. */
204                                if (pkt_len < tulip_rx_copybreak &&
205                                    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
206                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
207                                        pci_dma_sync_single_for_cpu(tp->pdev,
208                                                                    tp->rx_buffers[entry].mapping,
209                                                                    pkt_len, PCI_DMA_FROMDEVICE);
210 #if ! defined(__alpha__)
211                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212                                                         pkt_len);
213                                        skb_put(skb, pkt_len);
214 #else
215                                        memcpy(skb_put(skb, pkt_len),
216                                               tp->rx_buffers[entry].skb->data,
217                                               pkt_len);
218 #endif
219                                        pci_dma_sync_single_for_device(tp->pdev,
220                                                                       tp->rx_buffers[entry].mapping,
221                                                                       pkt_len, PCI_DMA_FROMDEVICE);
222                                } else {        /* Pass up the skb already on the Rx ring. */
223                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224                                                             pkt_len);
225
226 #ifndef final_version
227                                        if (tp->rx_buffers[entry].mapping !=
228                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229                                                dev_err(&dev->dev,
230                                                        "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231                                                        le32_to_cpu(tp->rx_ring[entry].buffer1),
232                                                        (unsigned long long)tp->rx_buffers[entry].mapping,
233                                                        skb->head, temp);
234                                        }
235 #endif
236
237                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
238                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
239
240                                        tp->rx_buffers[entry].skb = NULL;
241                                        tp->rx_buffers[entry].mapping = 0;
242                                }
243                                skb->protocol = eth_type_trans(skb, dev);
244
245                                netif_receive_skb(skb);
246
247                                 dev->stats.rx_packets++;
248                                 dev->stats.rx_bytes += pkt_len;
249                        }
250 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251                        received++;
252 #endif
253
254                        entry = (++tp->cur_rx) % RX_RING_SIZE;
255                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
256                                tulip_refill_rx(dev);
257
258                 }
259
260                /* New ack strategy... irq does not ack Rx any longer
261                   hopefully this helps */
262
263                /* Really bad things can happen here... If new packet arrives
264                 * and an irq arrives (tx or just due to occasionally unset
265                 * mask), it will be acked by irq handler, but new thread
266                 * is not scheduled. It is major hole in design.
267                 * No idea how to fix this if "playing with fire" will fail
268                 * tomorrow (night 011029). If it will not fail, we won
269                 * finally: amount of IO did not increase at all. */
270        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
271
272  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
273
274           /* We use this simplistic scheme for IM. It's proven by
275              real life installations. We can have IM enabled
276             continuesly but this would cause unnecessary latency.
277             Unfortunely we can't use all the NET_RX_* feedback here.
278             This would turn on IM for devices that is not contributing
279             to backlog congestion with unnecessary latency.
280
281              We monitor the device RX-ring and have:
282
283              HW Interrupt Mitigation either ON or OFF.
284
285             ON:  More then 1 pkt received (per intr.) OR we are dropping
286              OFF: Only 1 pkt received
287
288              Note. We only use min and max (0, 15) settings from mit_table */
289
290
291           if( tp->flags &  HAS_INTR_MITIGATION) {
292                  if( received > 1 ) {
293                          if( ! tp->mit_on ) {
294                                  tp->mit_on = 1;
295                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
296                          }
297                   }
298                  else {
299                          if( tp->mit_on ) {
300                                  tp->mit_on = 0;
301                                  iowrite32(0, tp->base_addr + CSR11);
302                          }
303                   }
304           }
305
306 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
307
308          tulip_refill_rx(dev);
309
310          /* If RX ring is not full we are out of memory. */
311          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
312                  goto oom;
313
314          /* Remove us from polling list and enable RX intr. */
315
316          napi_complete(napi);
317          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
318
319          /* The last op happens after poll completion. Which means the following:
320           * 1. it can race with disabling irqs in irq handler
321           * 2. it can race with dise/enabling irqs in other poll threads
322           * 3. if an irq raised after beginning loop, it will be immediately
323           *    triggered here.
324           *
325           * Summarizing: the logic results in some redundant irqs both
326           * due to races in masking and due to too late acking of already
327           * processed irqs. But it must not result in losing events.
328           */
329
330          return work_done;
331
332  not_done:
333          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335                  tulip_refill_rx(dev);
336
337          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338                  goto oom;
339
340          return work_done;
341
342  oom:    /* Executed with RX ints disabled */
343
344          /* Start timer, stop polling, but do not enable rx interrupts. */
345          mod_timer(&tp->oom_timer, jiffies+1);
346
347          /* Think: timer_pending() was an explicit signature of bug.
348           * Timer can be pending now but fired and completed
349           * before we did napi_complete(). See? We would lose it. */
350
351          /* remove ourselves from the polling list */
352          napi_complete(napi);
353
354          return work_done;
355 }
356
357 #else /* CONFIG_TULIP_NAPI */
358
359 static int tulip_rx(struct net_device *dev)
360 {
361         struct tulip_private *tp = netdev_priv(dev);
362         int entry = tp->cur_rx % RX_RING_SIZE;
363         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
364         int received = 0;
365
366         if (tulip_debug > 4)
367                 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
368                            entry, tp->rx_ring[entry].status);
369         /* If we own the next entry, it is a new packet. Send it up. */
370         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
371                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
372                 short pkt_len;
373
374                 if (tulip_debug > 5)
375                         netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
376                                    entry, status);
377                 if (--rx_work_limit < 0)
378                         break;
379
380                 /*
381                   Omit the four octet CRC from the length.
382                   (May not be considered valid until we have
383                   checked status for RxLengthOver2047 bits)
384                 */
385                 pkt_len = ((status >> 16) & 0x7ff) - 4;
386                 /*
387                   Maximum pkt_len is 1518 (1514 + vlan header)
388                   Anything higher than this is always invalid
389                   regardless of RxLengthOver2047 bits
390                 */
391
392                 if ((status & (RxLengthOver2047 |
393                                RxDescCRCError |
394                                RxDescCollisionSeen |
395                                RxDescRunt |
396                                RxDescDescErr |
397                                RxWholePkt))        != RxWholePkt ||
398                     pkt_len > 1518) {
399                         if ((status & (RxLengthOver2047 |
400                              RxWholePkt))         != RxWholePkt) {
401                                 /* Ingore earlier buffers. */
402                                 if ((status & 0xffff) != 0x7fff) {
403                                         if (tulip_debug > 1)
404                                                 netdev_warn(dev,
405                                                             "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406                                                             status);
407                                         dev->stats.rx_length_errors++;
408                                 }
409                         } else {
410                                 /* There was a fatal error. */
411                                 if (tulip_debug > 2)
412                                         netdev_dbg(dev, "Receive error, Rx status %08x\n",
413                                                    status);
414                                 dev->stats.rx_errors++; /* end of a packet.*/
415                                 if (pkt_len > 1518 ||
416                                     (status & RxDescRunt))
417                                         dev->stats.rx_length_errors++;
418                                 if (status & 0x0004)
419                                         dev->stats.rx_frame_errors++;
420                                 if (status & 0x0002)
421                                         dev->stats.rx_crc_errors++;
422                                 if (status & 0x0001)
423                                         dev->stats.rx_fifo_errors++;
424                         }
425                 } else {
426                         struct sk_buff *skb;
427
428                         /* Check if the packet is long enough to accept without copying
429                            to a minimally-sized skbuff. */
430                         if (pkt_len < tulip_rx_copybreak &&
431                             (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
432                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
433                                 pci_dma_sync_single_for_cpu(tp->pdev,
434                                                             tp->rx_buffers[entry].mapping,
435                                                             pkt_len, PCI_DMA_FROMDEVICE);
436 #if ! defined(__alpha__)
437                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
438                                                  pkt_len);
439                                 skb_put(skb, pkt_len);
440 #else
441                                 memcpy(skb_put(skb, pkt_len),
442                                        tp->rx_buffers[entry].skb->data,
443                                        pkt_len);
444 #endif
445                                 pci_dma_sync_single_for_device(tp->pdev,
446                                                                tp->rx_buffers[entry].mapping,
447                                                                pkt_len, PCI_DMA_FROMDEVICE);
448                         } else {        /* Pass up the skb already on the Rx ring. */
449                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
450                                                      pkt_len);
451
452 #ifndef final_version
453                                 if (tp->rx_buffers[entry].mapping !=
454                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
455                                         dev_err(&dev->dev,
456                                                 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
457                                                 le32_to_cpu(tp->rx_ring[entry].buffer1),
458                                                 (long long)tp->rx_buffers[entry].mapping,
459                                                 skb->head, temp);
460                                 }
461 #endif
462
463                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
464                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
465
466                                 tp->rx_buffers[entry].skb = NULL;
467                                 tp->rx_buffers[entry].mapping = 0;
468                         }
469                         skb->protocol = eth_type_trans(skb, dev);
470
471                         netif_rx(skb);
472
473                         dev->stats.rx_packets++;
474                         dev->stats.rx_bytes += pkt_len;
475                 }
476                 received++;
477                 entry = (++tp->cur_rx) % RX_RING_SIZE;
478         }
479         return received;
480 }
481 #endif  /* CONFIG_TULIP_NAPI */
482
483 static inline unsigned int phy_interrupt (struct net_device *dev)
484 {
485 #ifdef __hppa__
486         struct tulip_private *tp = netdev_priv(dev);
487         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
488
489         if (csr12 != tp->csr12_shadow) {
490                 /* ack interrupt */
491                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
492                 tp->csr12_shadow = csr12;
493                 /* do link change stuff */
494                 spin_lock(&tp->lock);
495                 tulip_check_duplex(dev);
496                 spin_unlock(&tp->lock);
497                 /* clear irq ack bit */
498                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
499
500                 return 1;
501         }
502 #endif
503
504         return 0;
505 }
506
507 /* The interrupt handler does all of the Rx thread work and cleans up
508    after the Tx thread. */
509 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
510 {
511         struct net_device *dev = (struct net_device *)dev_instance;
512         struct tulip_private *tp = netdev_priv(dev);
513         void __iomem *ioaddr = tp->base_addr;
514         int csr5;
515         int missed;
516         int rx = 0;
517         int tx = 0;
518         int oi = 0;
519         int maxrx = RX_RING_SIZE;
520         int maxtx = TX_RING_SIZE;
521         int maxoi = TX_RING_SIZE;
522 #ifdef CONFIG_TULIP_NAPI
523         int rxd = 0;
524 #else
525         int entry;
526 #endif
527         unsigned int work_count = tulip_max_interrupt_work;
528         unsigned int handled = 0;
529
530         /* Let's see whether the interrupt really is for us */
531         csr5 = ioread32(ioaddr + CSR5);
532
533         if (tp->flags & HAS_PHY_IRQ)
534                 handled = phy_interrupt (dev);
535
536         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
537                 return IRQ_RETVAL(handled);
538
539         tp->nir++;
540
541         do {
542
543 #ifdef CONFIG_TULIP_NAPI
544
545                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
546                         rxd++;
547                         /* Mask RX intrs and add the device to poll list. */
548                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
549                         napi_schedule(&tp->napi);
550
551                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
552                                break;
553                 }
554
555                /* Acknowledge the interrupt sources we handle here ASAP
556                   the poll function does Rx and RxNoBuf acking */
557
558                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
559
560 #else
561                 /* Acknowledge all of the current interrupt sources ASAP. */
562                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
563
564
565                 if (csr5 & (RxIntr | RxNoBuf)) {
566                                 rx += tulip_rx(dev);
567                         tulip_refill_rx(dev);
568                 }
569
570 #endif /*  CONFIG_TULIP_NAPI */
571
572                 if (tulip_debug > 4)
573                         netdev_dbg(dev, "interrupt  csr5=%#8.8x new csr5=%#8.8x\n",
574                                    csr5, ioread32(ioaddr + CSR5));
575
576
577                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
578                         unsigned int dirty_tx;
579
580                         spin_lock(&tp->lock);
581
582                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
583                                  dirty_tx++) {
584                                 int entry = dirty_tx % TX_RING_SIZE;
585                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
586
587                                 if (status < 0)
588                                         break;                  /* It still has not been Txed */
589
590                                 /* Check for Rx filter setup frames. */
591                                 if (tp->tx_buffers[entry].skb == NULL) {
592                                         /* test because dummy frames not mapped */
593                                         if (tp->tx_buffers[entry].mapping)
594                                                 pci_unmap_single(tp->pdev,
595                                                          tp->tx_buffers[entry].mapping,
596                                                          sizeof(tp->setup_frame),
597                                                          PCI_DMA_TODEVICE);
598                                         continue;
599                                 }
600
601                                 if (status & 0x8000) {
602                                         /* There was an major error, log it. */
603 #ifndef final_version
604                                         if (tulip_debug > 1)
605                                                 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
606                                                            status);
607 #endif
608                                         dev->stats.tx_errors++;
609                                         if (status & 0x4104)
610                                                 dev->stats.tx_aborted_errors++;
611                                         if (status & 0x0C00)
612                                                 dev->stats.tx_carrier_errors++;
613                                         if (status & 0x0200)
614                                                 dev->stats.tx_window_errors++;
615                                         if (status & 0x0002)
616                                                 dev->stats.tx_fifo_errors++;
617                                         if ((status & 0x0080) && tp->full_duplex == 0)
618                                                 dev->stats.tx_heartbeat_errors++;
619                                 } else {
620                                         dev->stats.tx_bytes +=
621                                                 tp->tx_buffers[entry].skb->len;
622                                         dev->stats.collisions += (status >> 3) & 15;
623                                         dev->stats.tx_packets++;
624                                 }
625
626                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
627                                                  tp->tx_buffers[entry].skb->len,
628                                                  PCI_DMA_TODEVICE);
629
630                                 /* Free the original skb. */
631                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
632                                 tp->tx_buffers[entry].skb = NULL;
633                                 tp->tx_buffers[entry].mapping = 0;
634                                 tx++;
635                         }
636
637 #ifndef final_version
638                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
639                                 dev_err(&dev->dev,
640                                         "Out-of-sync dirty pointer, %d vs. %d\n",
641                                         dirty_tx, tp->cur_tx);
642                                 dirty_tx += TX_RING_SIZE;
643                         }
644 #endif
645
646                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
647                                 netif_wake_queue(dev);
648
649                         tp->dirty_tx = dirty_tx;
650                         if (csr5 & TxDied) {
651                                 if (tulip_debug > 2)
652                                         dev_warn(&dev->dev,
653                                                  "The transmitter stopped.  CSR5 is %x, CSR6 %x, new CSR6 %x\n",
654                                                  csr5, ioread32(ioaddr + CSR6),
655                                                  tp->csr6);
656                                 tulip_restart_rxtx(tp);
657                         }
658                         spin_unlock(&tp->lock);
659                 }
660
661                 /* Log errors. */
662                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
663                         if (csr5 == 0xffffffff)
664                                 break;
665                         if (csr5 & TxJabber)
666                                 dev->stats.tx_errors++;
667                         if (csr5 & TxFIFOUnderflow) {
668                                 if ((tp->csr6 & 0xC000) != 0xC000)
669                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
670                                 else
671                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
672                                 /* Restart the transmit process. */
673                                 tulip_restart_rxtx(tp);
674                                 iowrite32(0, ioaddr + CSR1);
675                         }
676                         if (csr5 & (RxDied | RxNoBuf)) {
677                                 if (tp->flags & COMET_MAC_ADDR) {
678                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
679                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
680                                 }
681                         }
682                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
683                                 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
684                                 dev->stats.rx_errors++;
685                                 tulip_start_rxtx(tp);
686                         }
687                         /*
688                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
689                          * call is ever done under the spinlock
690                          */
691                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
692                                 if (tp->link_change)
693                                         (tp->link_change)(dev, csr5);
694                         }
695                         if (csr5 & SystemError) {
696                                 int error = (csr5 >> 23) & 7;
697                                 /* oops, we hit a PCI error.  The code produced corresponds
698                                  * to the reason:
699                                  *  0 - parity error
700                                  *  1 - master abort
701                                  *  2 - target abort
702                                  * Note that on parity error, we should do a software reset
703                                  * of the chip to get it back into a sane state (according
704                                  * to the 21142/3 docs that is).
705                                  *   -- rmk
706                                  */
707                                 dev_err(&dev->dev,
708                                         "(%lu) System Error occurred (%d)\n",
709                                         tp->nir, error);
710                         }
711                         /* Clear all error sources, included undocumented ones! */
712                         iowrite32(0x0800f7ba, ioaddr + CSR5);
713                         oi++;
714                 }
715                 if (csr5 & TimerInt) {
716
717                         if (tulip_debug > 2)
718                                 dev_err(&dev->dev,
719                                         "Re-enabling interrupts, %08x\n",
720                                         csr5);
721                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
722                         tp->ttimer = 0;
723                         oi++;
724                 }
725                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
726                         if (tulip_debug > 1)
727                                 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
728                                          csr5, tp->nir, tx, rx, oi);
729
730                        /* Acknowledge all interrupt sources. */
731                         iowrite32(0x8001ffff, ioaddr + CSR5);
732                         if (tp->flags & HAS_INTR_MITIGATION) {
733                      /* Josip Loncaric at ICASE did extensive experimentation
734                         to develop a good interrupt mitigation setting.*/
735                                 iowrite32(0x8b240000, ioaddr + CSR11);
736                         } else if (tp->chip_id == LC82C168) {
737                                 /* the LC82C168 doesn't have a hw timer.*/
738                                 iowrite32(0x00, ioaddr + CSR7);
739                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
740                         } else {
741                           /* Mask all interrupting sources, set timer to
742                                 re-enable. */
743                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
744                                 iowrite32(0x0012, ioaddr + CSR11);
745                         }
746                         break;
747                 }
748
749                 work_count--;
750                 if (work_count == 0)
751                         break;
752
753                 csr5 = ioread32(ioaddr + CSR5);
754
755 #ifdef CONFIG_TULIP_NAPI
756                 if (rxd)
757                         csr5 &= ~RxPollInt;
758         } while ((csr5 & (TxNoBuf |
759                           TxDied |
760                           TxIntr |
761                           TimerInt |
762                           /* Abnormal intr. */
763                           RxDied |
764                           TxFIFOUnderflow |
765                           TxJabber |
766                           TPLnkFail |
767                           SystemError )) != 0);
768 #else
769         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
770
771         tulip_refill_rx(dev);
772
773         /* check if the card is in suspend mode */
774         entry = tp->dirty_rx % RX_RING_SIZE;
775         if (tp->rx_buffers[entry].skb == NULL) {
776                 if (tulip_debug > 1)
777                         dev_warn(&dev->dev,
778                                  "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
779                                  tp->nir, tp->cur_rx, tp->ttimer, rx);
780                 if (tp->chip_id == LC82C168) {
781                         iowrite32(0x00, ioaddr + CSR7);
782                         mod_timer(&tp->timer, RUN_AT(HZ/50));
783                 } else {
784                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
785                                 if (tulip_debug > 1)
786                                         dev_warn(&dev->dev,
787                                                  "in rx suspend mode: (%lu) set timer\n",
788                                                  tp->nir);
789                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
790                                         ioaddr + CSR7);
791                                 iowrite32(TimerInt, ioaddr + CSR5);
792                                 iowrite32(12, ioaddr + CSR11);
793                                 tp->ttimer = 1;
794                         }
795                 }
796         }
797 #endif /* CONFIG_TULIP_NAPI */
798
799         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
800                 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
801         }
802
803         if (tulip_debug > 4)
804                 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
805                            ioread32(ioaddr + CSR5));
806
807         return IRQ_HANDLED;
808 }