Merge tag 'r8169-20060920-00' of git://electric-eye.fr.zoreil.com/home/romieu/linux...
[pandora-kernel.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Maintained by Valerie Henson <val_henson@linux.intel.com>
5         Copyright 2000,2001  The Linux Kernel Team
6         Written/copyright 1994-2001 by Donald Becker.
7
8         This software may be used and distributed according to the terms
9         of the GNU General Public License, incorporated herein by reference.
10
11         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12         for more information on this driver, or visit the project
13         Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include <linux/pci.h>
18 #include "tulip.h"
19 #include <linux/etherdevice.h>
20
21 int tulip_rx_copybreak;
22 unsigned int tulip_max_interrupt_work;
23
24 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
25 #define MIT_SIZE 15
26 #define MIT_TABLE 15 /* We use 0 or max */
27
28 static unsigned int mit_table[MIT_SIZE+1] =
29 {
30         /*  CRS11 21143 hardware Mitigation Control Interrupt
31             We use only RX mitigation we other techniques for
32             TX intr. mitigation.
33
34            31    Cycle Size (timer control)
35            30:27 TX timer in 16 * Cycle size
36            26:24 TX No pkts before Int.
37            23:20 RX timer in Cycle size
38            19:17 RX No pkts before Int.
39            16       Continues Mode (CM)
40         */
41
42         0x0,             /* IM disabled */
43         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
44         0x80150000,
45         0x80270000,
46         0x80370000,
47         0x80490000,
48         0x80590000,
49         0x80690000,
50         0x807B0000,
51         0x808B0000,
52         0x809D0000,
53         0x80AD0000,
54         0x80BD0000,
55         0x80CF0000,
56         0x80DF0000,
57 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
58         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
59 };
60 #endif
61
62
63 int tulip_refill_rx(struct net_device *dev)
64 {
65         struct tulip_private *tp = netdev_priv(dev);
66         int entry;
67         int refilled = 0;
68
69         /* Refill the Rx ring buffers. */
70         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
71                 entry = tp->dirty_rx % RX_RING_SIZE;
72                 if (tp->rx_buffers[entry].skb == NULL) {
73                         struct sk_buff *skb;
74                         dma_addr_t mapping;
75
76                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
77                         if (skb == NULL)
78                                 break;
79
80                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
81                                                  PCI_DMA_FROMDEVICE);
82                         tp->rx_buffers[entry].mapping = mapping;
83
84                         skb->dev = dev;                 /* Mark as being used by this device. */
85                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
86                         refilled++;
87                 }
88                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
89         }
90         if(tp->chip_id == LC82C168) {
91                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
92                         /* Rx stopped due to out of buffers,
93                          * restart it
94                          */
95                         iowrite32(0x01, tp->base_addr + CSR2);
96                 }
97         }
98         return refilled;
99 }
100
101 #ifdef CONFIG_TULIP_NAPI
102
103 void oom_timer(unsigned long data)
104 {
105         struct net_device *dev = (struct net_device *)data;
106         netif_rx_schedule(dev);
107 }
108
109 int tulip_poll(struct net_device *dev, int *budget)
110 {
111         struct tulip_private *tp = netdev_priv(dev);
112         int entry = tp->cur_rx % RX_RING_SIZE;
113         int rx_work_limit = *budget;
114         int received = 0;
115
116         if (!netif_running(dev))
117                 goto done;
118
119         if (rx_work_limit > dev->quota)
120                 rx_work_limit = dev->quota;
121
122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
123
124 /* that one buffer is needed for mit activation; or might be a
125    bug in the ring buffer code; check later -- JHS*/
126
127         if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
128 #endif
129
130         if (tulip_debug > 4)
131                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
132                            tp->rx_ring[entry].status);
133
134        do {
135                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
136                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
137                         break;
138                 }
139                /* Acknowledge current RX interrupt sources. */
140                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
141
142
143                /* If we own the next entry, it is a new packet. Send it up. */
144                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
146
147
148                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149                                break;
150
151                        if (tulip_debug > 5)
152                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
153                                       dev->name, entry, status);
154                        if (--rx_work_limit < 0)
155                                goto not_done;
156
157                        if ((status & 0x38008300) != 0x0300) {
158                                if ((status & 0x38000300) != 0x0300) {
159                                 /* Ingore earlier buffers. */
160                                        if ((status & 0xffff) != 0x7fff) {
161                                                if (tulip_debug > 1)
162                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
163                                                               "spanned multiple buffers, status %8.8x!\n",
164                                                               dev->name, status);
165                                                tp->stats.rx_length_errors++;
166                                        }
167                                } else if (status & RxDescFatalErr) {
168                                 /* There was a fatal error. */
169                                        if (tulip_debug > 2)
170                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
171                                                       dev->name, status);
172                                        tp->stats.rx_errors++; /* end of a packet.*/
173                                        if (status & 0x0890) tp->stats.rx_length_errors++;
174                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
175                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
176                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
177                                }
178                        } else {
179                                /* Omit the four octet CRC from the length. */
180                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
181                                struct sk_buff *skb;
182
183 #ifndef final_version
184                                if (pkt_len > 1518) {
185                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
186                                               dev->name, pkt_len, pkt_len);
187                                        pkt_len = 1518;
188                                        tp->stats.rx_length_errors++;
189                                }
190 #endif
191                                /* Check if the packet is long enough to accept without copying
192                                   to a minimally-sized skbuff. */
193                                if (pkt_len < tulip_rx_copybreak
194                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195                                        skb->dev = dev;
196                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
197                                        pci_dma_sync_single_for_cpu(tp->pdev,
198                                                                    tp->rx_buffers[entry].mapping,
199                                                                    pkt_len, PCI_DMA_FROMDEVICE);
200 #if ! defined(__alpha__)
201                                        eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
202                                                         pkt_len, 0);
203                                        skb_put(skb, pkt_len);
204 #else
205                                        memcpy(skb_put(skb, pkt_len),
206                                               tp->rx_buffers[entry].skb->data,
207                                               pkt_len);
208 #endif
209                                        pci_dma_sync_single_for_device(tp->pdev,
210                                                                       tp->rx_buffers[entry].mapping,
211                                                                       pkt_len, PCI_DMA_FROMDEVICE);
212                                } else {        /* Pass up the skb already on the Rx ring. */
213                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
214                                                             pkt_len);
215
216 #ifndef final_version
217                                        if (tp->rx_buffers[entry].mapping !=
218                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
219                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
220                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
221                                                       dev->name,
222                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
223                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
224                                                       skb->head, temp);
225                                        }
226 #endif
227
228                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
229                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
230
231                                        tp->rx_buffers[entry].skb = NULL;
232                                        tp->rx_buffers[entry].mapping = 0;
233                                }
234                                skb->protocol = eth_type_trans(skb, dev);
235
236                                netif_receive_skb(skb);
237
238                                dev->last_rx = jiffies;
239                                tp->stats.rx_packets++;
240                                tp->stats.rx_bytes += pkt_len;
241                        }
242                        received++;
243
244                        entry = (++tp->cur_rx) % RX_RING_SIZE;
245                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
246                                tulip_refill_rx(dev);
247
248                 }
249
250                /* New ack strategy... irq does not ack Rx any longer
251                   hopefully this helps */
252
253                /* Really bad things can happen here... If new packet arrives
254                 * and an irq arrives (tx or just due to occasionally unset
255                 * mask), it will be acked by irq handler, but new thread
256                 * is not scheduled. It is major hole in design.
257                 * No idea how to fix this if "playing with fire" will fail
258                 * tomorrow (night 011029). If it will not fail, we won
259                 * finally: amount of IO did not increase at all. */
260        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
261
262 done:
263
264  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
265
266           /* We use this simplistic scheme for IM. It's proven by
267              real life installations. We can have IM enabled
268             continuesly but this would cause unnecessary latency.
269             Unfortunely we can't use all the NET_RX_* feedback here.
270             This would turn on IM for devices that is not contributing
271             to backlog congestion with unnecessary latency.
272
273              We monitor the the device RX-ring and have:
274
275              HW Interrupt Mitigation either ON or OFF.
276
277             ON:  More then 1 pkt received (per intr.) OR we are dropping
278              OFF: Only 1 pkt received
279
280              Note. We only use min and max (0, 15) settings from mit_table */
281
282
283           if( tp->flags &  HAS_INTR_MITIGATION) {
284                  if( received > 1 ) {
285                          if( ! tp->mit_on ) {
286                                  tp->mit_on = 1;
287                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
288                          }
289                   }
290                  else {
291                          if( tp->mit_on ) {
292                                  tp->mit_on = 0;
293                                  iowrite32(0, tp->base_addr + CSR11);
294                          }
295                   }
296           }
297
298 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
299
300          dev->quota -= received;
301          *budget -= received;
302
303          tulip_refill_rx(dev);
304
305          /* If RX ring is not full we are out of memory. */
306          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
307
308          /* Remove us from polling list and enable RX intr. */
309
310          netif_rx_complete(dev);
311          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
312
313          /* The last op happens after poll completion. Which means the following:
314           * 1. it can race with disabling irqs in irq handler
315           * 2. it can race with dise/enabling irqs in other poll threads
316           * 3. if an irq raised after beginning loop, it will be immediately
317           *    triggered here.
318           *
319           * Summarizing: the logic results in some redundant irqs both
320           * due to races in masking and due to too late acking of already
321           * processed irqs. But it must not result in losing events.
322           */
323
324          return 0;
325
326  not_done:
327          if (!received) {
328
329                  received = dev->quota; /* Not to happen */
330          }
331          dev->quota -= received;
332          *budget -= received;
333
334          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
335              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
336                  tulip_refill_rx(dev);
337
338          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
339
340          return 1;
341
342
343  oom:    /* Executed with RX ints disabled */
344
345
346          /* Start timer, stop polling, but do not enable rx interrupts. */
347          mod_timer(&tp->oom_timer, jiffies+1);
348
349          /* Think: timer_pending() was an explicit signature of bug.
350           * Timer can be pending now but fired and completed
351           * before we did netif_rx_complete(). See? We would lose it. */
352
353          /* remove ourselves from the polling list */
354          netif_rx_complete(dev);
355
356          return 0;
357 }
358
359 #else /* CONFIG_TULIP_NAPI */
360
361 static int tulip_rx(struct net_device *dev)
362 {
363         struct tulip_private *tp = netdev_priv(dev);
364         int entry = tp->cur_rx % RX_RING_SIZE;
365         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
366         int received = 0;
367
368         if (tulip_debug > 4)
369                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
370                            tp->rx_ring[entry].status);
371         /* If we own the next entry, it is a new packet. Send it up. */
372         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
373                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
374
375                 if (tulip_debug > 5)
376                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
377                                    dev->name, entry, status);
378                 if (--rx_work_limit < 0)
379                         break;
380                 if ((status & 0x38008300) != 0x0300) {
381                         if ((status & 0x38000300) != 0x0300) {
382                                 /* Ingore earlier buffers. */
383                                 if ((status & 0xffff) != 0x7fff) {
384                                         if (tulip_debug > 1)
385                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
386                                                            "spanned multiple buffers, status %8.8x!\n",
387                                                            dev->name, status);
388                                         tp->stats.rx_length_errors++;
389                                 }
390                         } else if (status & RxDescFatalErr) {
391                                 /* There was a fatal error. */
392                                 if (tulip_debug > 2)
393                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
394                                                    dev->name, status);
395                                 tp->stats.rx_errors++; /* end of a packet.*/
396                                 if (status & 0x0890) tp->stats.rx_length_errors++;
397                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
398                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
399                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
400                         }
401                 } else {
402                         /* Omit the four octet CRC from the length. */
403                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
404                         struct sk_buff *skb;
405
406 #ifndef final_version
407                         if (pkt_len > 1518) {
408                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
409                                            dev->name, pkt_len, pkt_len);
410                                 pkt_len = 1518;
411                                 tp->stats.rx_length_errors++;
412                         }
413 #endif
414
415                         /* Check if the packet is long enough to accept without copying
416                            to a minimally-sized skbuff. */
417                         if (pkt_len < tulip_rx_copybreak
418                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
419                                 skb->dev = dev;
420                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
421                                 pci_dma_sync_single_for_cpu(tp->pdev,
422                                                             tp->rx_buffers[entry].mapping,
423                                                             pkt_len, PCI_DMA_FROMDEVICE);
424 #if ! defined(__alpha__)
425                                 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
426                                                  pkt_len, 0);
427                                 skb_put(skb, pkt_len);
428 #else
429                                 memcpy(skb_put(skb, pkt_len),
430                                        tp->rx_buffers[entry].skb->data,
431                                        pkt_len);
432 #endif
433                                 pci_dma_sync_single_for_device(tp->pdev,
434                                                                tp->rx_buffers[entry].mapping,
435                                                                pkt_len, PCI_DMA_FROMDEVICE);
436                         } else {        /* Pass up the skb already on the Rx ring. */
437                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
438                                                      pkt_len);
439
440 #ifndef final_version
441                                 if (tp->rx_buffers[entry].mapping !=
442                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
443                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
444                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
445                                                dev->name,
446                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
447                                                (long long)tp->rx_buffers[entry].mapping,
448                                                skb->head, temp);
449                                 }
450 #endif
451
452                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
453                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
454
455                                 tp->rx_buffers[entry].skb = NULL;
456                                 tp->rx_buffers[entry].mapping = 0;
457                         }
458                         skb->protocol = eth_type_trans(skb, dev);
459
460                         netif_rx(skb);
461
462                         dev->last_rx = jiffies;
463                         tp->stats.rx_packets++;
464                         tp->stats.rx_bytes += pkt_len;
465                 }
466                 received++;
467                 entry = (++tp->cur_rx) % RX_RING_SIZE;
468         }
469         return received;
470 }
471 #endif  /* CONFIG_TULIP_NAPI */
472
473 static inline unsigned int phy_interrupt (struct net_device *dev)
474 {
475 #ifdef __hppa__
476         struct tulip_private *tp = netdev_priv(dev);
477         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
478
479         if (csr12 != tp->csr12_shadow) {
480                 /* ack interrupt */
481                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
482                 tp->csr12_shadow = csr12;
483                 /* do link change stuff */
484                 spin_lock(&tp->lock);
485                 tulip_check_duplex(dev);
486                 spin_unlock(&tp->lock);
487                 /* clear irq ack bit */
488                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
489
490                 return 1;
491         }
492 #endif
493
494         return 0;
495 }
496
497 /* The interrupt handler does all of the Rx thread work and cleans up
498    after the Tx thread. */
499 irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
500 {
501         struct net_device *dev = (struct net_device *)dev_instance;
502         struct tulip_private *tp = netdev_priv(dev);
503         void __iomem *ioaddr = tp->base_addr;
504         int csr5;
505         int missed;
506         int rx = 0;
507         int tx = 0;
508         int oi = 0;
509         int maxrx = RX_RING_SIZE;
510         int maxtx = TX_RING_SIZE;
511         int maxoi = TX_RING_SIZE;
512 #ifdef CONFIG_TULIP_NAPI
513         int rxd = 0;
514 #else
515         int entry;
516 #endif
517         unsigned int work_count = tulip_max_interrupt_work;
518         unsigned int handled = 0;
519
520         /* Let's see whether the interrupt really is for us */
521         csr5 = ioread32(ioaddr + CSR5);
522
523         if (tp->flags & HAS_PHY_IRQ)
524                 handled = phy_interrupt (dev);
525
526         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
527                 return IRQ_RETVAL(handled);
528
529         tp->nir++;
530
531         do {
532
533 #ifdef CONFIG_TULIP_NAPI
534
535                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
536                         rxd++;
537                         /* Mask RX intrs and add the device to poll list. */
538                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
539                         netif_rx_schedule(dev);
540
541                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
542                                break;
543                 }
544
545                /* Acknowledge the interrupt sources we handle here ASAP
546                   the poll function does Rx and RxNoBuf acking */
547
548                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
549
550 #else
551                 /* Acknowledge all of the current interrupt sources ASAP. */
552                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
553
554
555                 if (csr5 & (RxIntr | RxNoBuf)) {
556                                 rx += tulip_rx(dev);
557                         tulip_refill_rx(dev);
558                 }
559
560 #endif /*  CONFIG_TULIP_NAPI */
561
562                 if (tulip_debug > 4)
563                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
564                                dev->name, csr5, ioread32(ioaddr + CSR5));
565
566
567                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
568                         unsigned int dirty_tx;
569
570                         spin_lock(&tp->lock);
571
572                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
573                                  dirty_tx++) {
574                                 int entry = dirty_tx % TX_RING_SIZE;
575                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
576
577                                 if (status < 0)
578                                         break;                  /* It still has not been Txed */
579
580                                 /* Check for Rx filter setup frames. */
581                                 if (tp->tx_buffers[entry].skb == NULL) {
582                                         /* test because dummy frames not mapped */
583                                         if (tp->tx_buffers[entry].mapping)
584                                                 pci_unmap_single(tp->pdev,
585                                                          tp->tx_buffers[entry].mapping,
586                                                          sizeof(tp->setup_frame),
587                                                          PCI_DMA_TODEVICE);
588                                         continue;
589                                 }
590
591                                 if (status & 0x8000) {
592                                         /* There was an major error, log it. */
593 #ifndef final_version
594                                         if (tulip_debug > 1)
595                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
596                                                            dev->name, status);
597 #endif
598                                         tp->stats.tx_errors++;
599                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
600                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
601                                         if (status & 0x0200) tp->stats.tx_window_errors++;
602                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
603                                         if ((status & 0x0080) && tp->full_duplex == 0)
604                                                 tp->stats.tx_heartbeat_errors++;
605                                 } else {
606                                         tp->stats.tx_bytes +=
607                                                 tp->tx_buffers[entry].skb->len;
608                                         tp->stats.collisions += (status >> 3) & 15;
609                                         tp->stats.tx_packets++;
610                                 }
611
612                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
613                                                  tp->tx_buffers[entry].skb->len,
614                                                  PCI_DMA_TODEVICE);
615
616                                 /* Free the original skb. */
617                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
618                                 tp->tx_buffers[entry].skb = NULL;
619                                 tp->tx_buffers[entry].mapping = 0;
620                                 tx++;
621                         }
622
623 #ifndef final_version
624                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
625                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
626                                            dev->name, dirty_tx, tp->cur_tx);
627                                 dirty_tx += TX_RING_SIZE;
628                         }
629 #endif
630
631                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
632                                 netif_wake_queue(dev);
633
634                         tp->dirty_tx = dirty_tx;
635                         if (csr5 & TxDied) {
636                                 if (tulip_debug > 2)
637                                         printk(KERN_WARNING "%s: The transmitter stopped."
638                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
639                                                    dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
640                                 tulip_restart_rxtx(tp);
641                         }
642                         spin_unlock(&tp->lock);
643                 }
644
645                 /* Log errors. */
646                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
647                         if (csr5 == 0xffffffff)
648                                 break;
649                         if (csr5 & TxJabber) tp->stats.tx_errors++;
650                         if (csr5 & TxFIFOUnderflow) {
651                                 if ((tp->csr6 & 0xC000) != 0xC000)
652                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
653                                 else
654                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
655                                 /* Restart the transmit process. */
656                                 tulip_restart_rxtx(tp);
657                                 iowrite32(0, ioaddr + CSR1);
658                         }
659                         if (csr5 & (RxDied | RxNoBuf)) {
660                                 if (tp->flags & COMET_MAC_ADDR) {
661                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
662                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
663                                 }
664                         }
665                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
666                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
667                                 tp->stats.rx_errors++;
668                                 tulip_start_rxtx(tp);
669                         }
670                         /*
671                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
672                          * call is ever done under the spinlock
673                          */
674                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
675                                 if (tp->link_change)
676                                         (tp->link_change)(dev, csr5);
677                         }
678                         if (csr5 & SytemError) {
679                                 int error = (csr5 >> 23) & 7;
680                                 /* oops, we hit a PCI error.  The code produced corresponds
681                                  * to the reason:
682                                  *  0 - parity error
683                                  *  1 - master abort
684                                  *  2 - target abort
685                                  * Note that on parity error, we should do a software reset
686                                  * of the chip to get it back into a sane state (according
687                                  * to the 21142/3 docs that is).
688                                  *   -- rmk
689                                  */
690                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
691                                         dev->name, tp->nir, error);
692                         }
693                         /* Clear all error sources, included undocumented ones! */
694                         iowrite32(0x0800f7ba, ioaddr + CSR5);
695                         oi++;
696                 }
697                 if (csr5 & TimerInt) {
698
699                         if (tulip_debug > 2)
700                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
701                                            dev->name, csr5);
702                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
703                         tp->ttimer = 0;
704                         oi++;
705                 }
706                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
707                         if (tulip_debug > 1)
708                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
709                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
710
711                        /* Acknowledge all interrupt sources. */
712                         iowrite32(0x8001ffff, ioaddr + CSR5);
713                         if (tp->flags & HAS_INTR_MITIGATION) {
714                      /* Josip Loncaric at ICASE did extensive experimentation
715                         to develop a good interrupt mitigation setting.*/
716                                 iowrite32(0x8b240000, ioaddr + CSR11);
717                         } else if (tp->chip_id == LC82C168) {
718                                 /* the LC82C168 doesn't have a hw timer.*/
719                                 iowrite32(0x00, ioaddr + CSR7);
720                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
721                         } else {
722                           /* Mask all interrupting sources, set timer to
723                                 re-enable. */
724                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
725                                 iowrite32(0x0012, ioaddr + CSR11);
726                         }
727                         break;
728                 }
729
730                 work_count--;
731                 if (work_count == 0)
732                         break;
733
734                 csr5 = ioread32(ioaddr + CSR5);
735
736 #ifdef CONFIG_TULIP_NAPI
737                 if (rxd)
738                         csr5 &= ~RxPollInt;
739         } while ((csr5 & (TxNoBuf |
740                           TxDied |
741                           TxIntr |
742                           TimerInt |
743                           /* Abnormal intr. */
744                           RxDied |
745                           TxFIFOUnderflow |
746                           TxJabber |
747                           TPLnkFail |
748                           SytemError )) != 0);
749 #else
750         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
751
752         tulip_refill_rx(dev);
753
754         /* check if the card is in suspend mode */
755         entry = tp->dirty_rx % RX_RING_SIZE;
756         if (tp->rx_buffers[entry].skb == NULL) {
757                 if (tulip_debug > 1)
758                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
759                 if (tp->chip_id == LC82C168) {
760                         iowrite32(0x00, ioaddr + CSR7);
761                         mod_timer(&tp->timer, RUN_AT(HZ/50));
762                 } else {
763                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
764                                 if (tulip_debug > 1)
765                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
766                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
767                                         ioaddr + CSR7);
768                                 iowrite32(TimerInt, ioaddr + CSR5);
769                                 iowrite32(12, ioaddr + CSR11);
770                                 tp->ttimer = 1;
771                         }
772                 }
773         }
774 #endif /* CONFIG_TULIP_NAPI */
775
776         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
777                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
778         }
779
780         if (tulip_debug > 4)
781                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
782                            dev->name, ioread32(ioaddr + CSR5));
783
784         return IRQ_HANDLED;
785 }