Merge branch 'topic/hda-gateway' into topic/hda
[pandora-kernel.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Copyright 2000,2001  The Linux Kernel Team
5         Written/copyright 1994-2001 by Donald Becker.
6
7         This software may be used and distributed according to the terms
8         of the GNU General Public License, incorporated herein by reference.
9
10         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
11         for more information on this driver.
12         Please submit bugs to http://bugzilla.kernel.org/ .
13
14 */
15
16 #include <linux/pci.h>
17 #include "tulip.h"
18 #include <linux/etherdevice.h>
19
20 int tulip_rx_copybreak;
21 unsigned int tulip_max_interrupt_work;
22
23 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24 #define MIT_SIZE 15
25 #define MIT_TABLE 15 /* We use 0 or max */
26
27 static unsigned int mit_table[MIT_SIZE+1] =
28 {
29         /*  CRS11 21143 hardware Mitigation Control Interrupt
30             We use only RX mitigation we other techniques for
31             TX intr. mitigation.
32
33            31    Cycle Size (timer control)
34            30:27 TX timer in 16 * Cycle size
35            26:24 TX No pkts before Int.
36            23:20 RX timer in Cycle size
37            19:17 RX No pkts before Int.
38            16       Continues Mode (CM)
39         */
40
41         0x0,             /* IM disabled */
42         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
43         0x80150000,
44         0x80270000,
45         0x80370000,
46         0x80490000,
47         0x80590000,
48         0x80690000,
49         0x807B0000,
50         0x808B0000,
51         0x809D0000,
52         0x80AD0000,
53         0x80BD0000,
54         0x80CF0000,
55         0x80DF0000,
56 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
57         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
58 };
59 #endif
60
61
62 int tulip_refill_rx(struct net_device *dev)
63 {
64         struct tulip_private *tp = netdev_priv(dev);
65         int entry;
66         int refilled = 0;
67
68         /* Refill the Rx ring buffers. */
69         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70                 entry = tp->dirty_rx % RX_RING_SIZE;
71                 if (tp->rx_buffers[entry].skb == NULL) {
72                         struct sk_buff *skb;
73                         dma_addr_t mapping;
74
75                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76                         if (skb == NULL)
77                                 break;
78
79                         mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
80                                                  PCI_DMA_FROMDEVICE);
81                         tp->rx_buffers[entry].mapping = mapping;
82
83                         skb->dev = dev;                 /* Mark as being used by this device. */
84                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85                         refilled++;
86                 }
87                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88         }
89         if(tp->chip_id == LC82C168) {
90                 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91                         /* Rx stopped due to out of buffers,
92                          * restart it
93                          */
94                         iowrite32(0x01, tp->base_addr + CSR2);
95                 }
96         }
97         return refilled;
98 }
99
100 #ifdef CONFIG_TULIP_NAPI
101
102 void oom_timer(unsigned long data)
103 {
104         struct net_device *dev = (struct net_device *)data;
105         struct tulip_private *tp = netdev_priv(dev);
106         netif_rx_schedule(&tp->napi);
107 }
108
109 int tulip_poll(struct napi_struct *napi, int budget)
110 {
111         struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112         struct net_device *dev = tp->dev;
113         int entry = tp->cur_rx % RX_RING_SIZE;
114         int work_done = 0;
115 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
116         int received = 0;
117 #endif
118
119 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121 /* that one buffer is needed for mit activation; or might be a
122    bug in the ring buffer code; check later -- JHS*/
123
124         if (budget >=RX_RING_SIZE) budget--;
125 #endif
126
127         if (tulip_debug > 4)
128                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129                            tp->rx_ring[entry].status);
130
131        do {
132                 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133                         printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134                         break;
135                 }
136                /* Acknowledge current RX interrupt sources. */
137                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
138
139
140                /* If we own the next entry, it is a new packet. Send it up. */
141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143
144                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
145                                break;
146
147                        if (tulip_debug > 5)
148                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
149                                       dev->name, entry, status);
150
151                        if (++work_done >= budget)
152                                goto not_done;
153
154                        if ((status & 0x38008300) != 0x0300) {
155                                if ((status & 0x38000300) != 0x0300) {
156                                 /* Ingore earlier buffers. */
157                                        if ((status & 0xffff) != 0x7fff) {
158                                                if (tulip_debug > 1)
159                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
160                                                               "spanned multiple buffers, status %8.8x!\n",
161                                                               dev->name, status);
162                                                tp->stats.rx_length_errors++;
163                                        }
164                                } else if (status & RxDescFatalErr) {
165                                 /* There was a fatal error. */
166                                        if (tulip_debug > 2)
167                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
168                                                       dev->name, status);
169                                        tp->stats.rx_errors++; /* end of a packet.*/
170                                        if (status & 0x0890) tp->stats.rx_length_errors++;
171                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
172                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
173                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
174                                }
175                        } else {
176                                /* Omit the four octet CRC from the length. */
177                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
178                                struct sk_buff *skb;
179
180 #ifndef final_version
181                                if (pkt_len > 1518) {
182                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183                                               dev->name, pkt_len, pkt_len);
184                                        pkt_len = 1518;
185                                        tp->stats.rx_length_errors++;
186                                }
187 #endif
188                                /* Check if the packet is long enough to accept without copying
189                                   to a minimally-sized skbuff. */
190                                if (pkt_len < tulip_rx_copybreak
191                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
192                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
193                                        pci_dma_sync_single_for_cpu(tp->pdev,
194                                                                    tp->rx_buffers[entry].mapping,
195                                                                    pkt_len, PCI_DMA_FROMDEVICE);
196 #if ! defined(__alpha__)
197                                        skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
198                                                         pkt_len);
199                                        skb_put(skb, pkt_len);
200 #else
201                                        memcpy(skb_put(skb, pkt_len),
202                                               tp->rx_buffers[entry].skb->data,
203                                               pkt_len);
204 #endif
205                                        pci_dma_sync_single_for_device(tp->pdev,
206                                                                       tp->rx_buffers[entry].mapping,
207                                                                       pkt_len, PCI_DMA_FROMDEVICE);
208                                } else {        /* Pass up the skb already on the Rx ring. */
209                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
210                                                             pkt_len);
211
212 #ifndef final_version
213                                        if (tp->rx_buffers[entry].mapping !=
214                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
215                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
216                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
217                                                       dev->name,
218                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
219                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
220                                                       skb->head, temp);
221                                        }
222 #endif
223
224                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
225                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
226
227                                        tp->rx_buffers[entry].skb = NULL;
228                                        tp->rx_buffers[entry].mapping = 0;
229                                }
230                                skb->protocol = eth_type_trans(skb, dev);
231
232                                netif_receive_skb(skb);
233
234                                tp->stats.rx_packets++;
235                                tp->stats.rx_bytes += pkt_len;
236                        }
237 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
238                        received++;
239 #endif
240
241                        entry = (++tp->cur_rx) % RX_RING_SIZE;
242                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
243                                tulip_refill_rx(dev);
244
245                 }
246
247                /* New ack strategy... irq does not ack Rx any longer
248                   hopefully this helps */
249
250                /* Really bad things can happen here... If new packet arrives
251                 * and an irq arrives (tx or just due to occasionally unset
252                 * mask), it will be acked by irq handler, but new thread
253                 * is not scheduled. It is major hole in design.
254                 * No idea how to fix this if "playing with fire" will fail
255                 * tomorrow (night 011029). If it will not fail, we won
256                 * finally: amount of IO did not increase at all. */
257        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
258
259  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
260
261           /* We use this simplistic scheme for IM. It's proven by
262              real life installations. We can have IM enabled
263             continuesly but this would cause unnecessary latency.
264             Unfortunely we can't use all the NET_RX_* feedback here.
265             This would turn on IM for devices that is not contributing
266             to backlog congestion with unnecessary latency.
267
268              We monitor the device RX-ring and have:
269
270              HW Interrupt Mitigation either ON or OFF.
271
272             ON:  More then 1 pkt received (per intr.) OR we are dropping
273              OFF: Only 1 pkt received
274
275              Note. We only use min and max (0, 15) settings from mit_table */
276
277
278           if( tp->flags &  HAS_INTR_MITIGATION) {
279                  if( received > 1 ) {
280                          if( ! tp->mit_on ) {
281                                  tp->mit_on = 1;
282                                  iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
283                          }
284                   }
285                  else {
286                          if( tp->mit_on ) {
287                                  tp->mit_on = 0;
288                                  iowrite32(0, tp->base_addr + CSR11);
289                          }
290                   }
291           }
292
293 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
294
295          tulip_refill_rx(dev);
296
297          /* If RX ring is not full we are out of memory. */
298          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
299                  goto oom;
300
301          /* Remove us from polling list and enable RX intr. */
302
303          netif_rx_complete(napi);
304          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
305
306          /* The last op happens after poll completion. Which means the following:
307           * 1. it can race with disabling irqs in irq handler
308           * 2. it can race with dise/enabling irqs in other poll threads
309           * 3. if an irq raised after beginning loop, it will be immediately
310           *    triggered here.
311           *
312           * Summarizing: the logic results in some redundant irqs both
313           * due to races in masking and due to too late acking of already
314           * processed irqs. But it must not result in losing events.
315           */
316
317          return work_done;
318
319  not_done:
320          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
321              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
322                  tulip_refill_rx(dev);
323
324          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
325                  goto oom;
326
327          return work_done;
328
329  oom:    /* Executed with RX ints disabled */
330
331          /* Start timer, stop polling, but do not enable rx interrupts. */
332          mod_timer(&tp->oom_timer, jiffies+1);
333
334          /* Think: timer_pending() was an explicit signature of bug.
335           * Timer can be pending now but fired and completed
336           * before we did netif_rx_complete(). See? We would lose it. */
337
338          /* remove ourselves from the polling list */
339          netif_rx_complete(napi);
340
341          return work_done;
342 }
343
344 #else /* CONFIG_TULIP_NAPI */
345
346 static int tulip_rx(struct net_device *dev)
347 {
348         struct tulip_private *tp = netdev_priv(dev);
349         int entry = tp->cur_rx % RX_RING_SIZE;
350         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
351         int received = 0;
352
353         if (tulip_debug > 4)
354                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
355                            tp->rx_ring[entry].status);
356         /* If we own the next entry, it is a new packet. Send it up. */
357         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
358                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
359
360                 if (tulip_debug > 5)
361                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
362                                    dev->name, entry, status);
363                 if (--rx_work_limit < 0)
364                         break;
365                 if ((status & 0x38008300) != 0x0300) {
366                         if ((status & 0x38000300) != 0x0300) {
367                                 /* Ingore earlier buffers. */
368                                 if ((status & 0xffff) != 0x7fff) {
369                                         if (tulip_debug > 1)
370                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
371                                                            "spanned multiple buffers, status %8.8x!\n",
372                                                            dev->name, status);
373                                         tp->stats.rx_length_errors++;
374                                 }
375                         } else if (status & RxDescFatalErr) {
376                                 /* There was a fatal error. */
377                                 if (tulip_debug > 2)
378                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
379                                                    dev->name, status);
380                                 tp->stats.rx_errors++; /* end of a packet.*/
381                                 if (status & 0x0890) tp->stats.rx_length_errors++;
382                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
383                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
384                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
385                         }
386                 } else {
387                         /* Omit the four octet CRC from the length. */
388                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
389                         struct sk_buff *skb;
390
391 #ifndef final_version
392                         if (pkt_len > 1518) {
393                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
394                                            dev->name, pkt_len, pkt_len);
395                                 pkt_len = 1518;
396                                 tp->stats.rx_length_errors++;
397                         }
398 #endif
399
400                         /* Check if the packet is long enough to accept without copying
401                            to a minimally-sized skbuff. */
402                         if (pkt_len < tulip_rx_copybreak
403                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
404                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
405                                 pci_dma_sync_single_for_cpu(tp->pdev,
406                                                             tp->rx_buffers[entry].mapping,
407                                                             pkt_len, PCI_DMA_FROMDEVICE);
408 #if ! defined(__alpha__)
409                                 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
410                                                  pkt_len);
411                                 skb_put(skb, pkt_len);
412 #else
413                                 memcpy(skb_put(skb, pkt_len),
414                                        tp->rx_buffers[entry].skb->data,
415                                        pkt_len);
416 #endif
417                                 pci_dma_sync_single_for_device(tp->pdev,
418                                                                tp->rx_buffers[entry].mapping,
419                                                                pkt_len, PCI_DMA_FROMDEVICE);
420                         } else {        /* Pass up the skb already on the Rx ring. */
421                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
422                                                      pkt_len);
423
424 #ifndef final_version
425                                 if (tp->rx_buffers[entry].mapping !=
426                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
427                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
428                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
429                                                dev->name,
430                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
431                                                (long long)tp->rx_buffers[entry].mapping,
432                                                skb->head, temp);
433                                 }
434 #endif
435
436                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
437                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
438
439                                 tp->rx_buffers[entry].skb = NULL;
440                                 tp->rx_buffers[entry].mapping = 0;
441                         }
442                         skb->protocol = eth_type_trans(skb, dev);
443
444                         netif_rx(skb);
445
446                         tp->stats.rx_packets++;
447                         tp->stats.rx_bytes += pkt_len;
448                 }
449                 received++;
450                 entry = (++tp->cur_rx) % RX_RING_SIZE;
451         }
452         return received;
453 }
454 #endif  /* CONFIG_TULIP_NAPI */
455
456 static inline unsigned int phy_interrupt (struct net_device *dev)
457 {
458 #ifdef __hppa__
459         struct tulip_private *tp = netdev_priv(dev);
460         int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
461
462         if (csr12 != tp->csr12_shadow) {
463                 /* ack interrupt */
464                 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
465                 tp->csr12_shadow = csr12;
466                 /* do link change stuff */
467                 spin_lock(&tp->lock);
468                 tulip_check_duplex(dev);
469                 spin_unlock(&tp->lock);
470                 /* clear irq ack bit */
471                 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
472
473                 return 1;
474         }
475 #endif
476
477         return 0;
478 }
479
480 /* The interrupt handler does all of the Rx thread work and cleans up
481    after the Tx thread. */
482 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
483 {
484         struct net_device *dev = (struct net_device *)dev_instance;
485         struct tulip_private *tp = netdev_priv(dev);
486         void __iomem *ioaddr = tp->base_addr;
487         int csr5;
488         int missed;
489         int rx = 0;
490         int tx = 0;
491         int oi = 0;
492         int maxrx = RX_RING_SIZE;
493         int maxtx = TX_RING_SIZE;
494         int maxoi = TX_RING_SIZE;
495 #ifdef CONFIG_TULIP_NAPI
496         int rxd = 0;
497 #else
498         int entry;
499 #endif
500         unsigned int work_count = tulip_max_interrupt_work;
501         unsigned int handled = 0;
502
503         /* Let's see whether the interrupt really is for us */
504         csr5 = ioread32(ioaddr + CSR5);
505
506         if (tp->flags & HAS_PHY_IRQ)
507                 handled = phy_interrupt (dev);
508
509         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
510                 return IRQ_RETVAL(handled);
511
512         tp->nir++;
513
514         do {
515
516 #ifdef CONFIG_TULIP_NAPI
517
518                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
519                         rxd++;
520                         /* Mask RX intrs and add the device to poll list. */
521                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
522                         netif_rx_schedule(&tp->napi);
523
524                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
525                                break;
526                 }
527
528                /* Acknowledge the interrupt sources we handle here ASAP
529                   the poll function does Rx and RxNoBuf acking */
530
531                 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
532
533 #else
534                 /* Acknowledge all of the current interrupt sources ASAP. */
535                 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
536
537
538                 if (csr5 & (RxIntr | RxNoBuf)) {
539                                 rx += tulip_rx(dev);
540                         tulip_refill_rx(dev);
541                 }
542
543 #endif /*  CONFIG_TULIP_NAPI */
544
545                 if (tulip_debug > 4)
546                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
547                                dev->name, csr5, ioread32(ioaddr + CSR5));
548
549
550                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
551                         unsigned int dirty_tx;
552
553                         spin_lock(&tp->lock);
554
555                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
556                                  dirty_tx++) {
557                                 int entry = dirty_tx % TX_RING_SIZE;
558                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
559
560                                 if (status < 0)
561                                         break;                  /* It still has not been Txed */
562
563                                 /* Check for Rx filter setup frames. */
564                                 if (tp->tx_buffers[entry].skb == NULL) {
565                                         /* test because dummy frames not mapped */
566                                         if (tp->tx_buffers[entry].mapping)
567                                                 pci_unmap_single(tp->pdev,
568                                                          tp->tx_buffers[entry].mapping,
569                                                          sizeof(tp->setup_frame),
570                                                          PCI_DMA_TODEVICE);
571                                         continue;
572                                 }
573
574                                 if (status & 0x8000) {
575                                         /* There was an major error, log it. */
576 #ifndef final_version
577                                         if (tulip_debug > 1)
578                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
579                                                            dev->name, status);
580 #endif
581                                         tp->stats.tx_errors++;
582                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
583                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
584                                         if (status & 0x0200) tp->stats.tx_window_errors++;
585                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
586                                         if ((status & 0x0080) && tp->full_duplex == 0)
587                                                 tp->stats.tx_heartbeat_errors++;
588                                 } else {
589                                         tp->stats.tx_bytes +=
590                                                 tp->tx_buffers[entry].skb->len;
591                                         tp->stats.collisions += (status >> 3) & 15;
592                                         tp->stats.tx_packets++;
593                                 }
594
595                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
596                                                  tp->tx_buffers[entry].skb->len,
597                                                  PCI_DMA_TODEVICE);
598
599                                 /* Free the original skb. */
600                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
601                                 tp->tx_buffers[entry].skb = NULL;
602                                 tp->tx_buffers[entry].mapping = 0;
603                                 tx++;
604                         }
605
606 #ifndef final_version
607                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
608                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
609                                            dev->name, dirty_tx, tp->cur_tx);
610                                 dirty_tx += TX_RING_SIZE;
611                         }
612 #endif
613
614                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
615                                 netif_wake_queue(dev);
616
617                         tp->dirty_tx = dirty_tx;
618                         if (csr5 & TxDied) {
619                                 if (tulip_debug > 2)
620                                         printk(KERN_WARNING "%s: The transmitter stopped."
621                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
622                                                    dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
623                                 tulip_restart_rxtx(tp);
624                         }
625                         spin_unlock(&tp->lock);
626                 }
627
628                 /* Log errors. */
629                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
630                         if (csr5 == 0xffffffff)
631                                 break;
632                         if (csr5 & TxJabber) tp->stats.tx_errors++;
633                         if (csr5 & TxFIFOUnderflow) {
634                                 if ((tp->csr6 & 0xC000) != 0xC000)
635                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
636                                 else
637                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
638                                 /* Restart the transmit process. */
639                                 tulip_restart_rxtx(tp);
640                                 iowrite32(0, ioaddr + CSR1);
641                         }
642                         if (csr5 & (RxDied | RxNoBuf)) {
643                                 if (tp->flags & COMET_MAC_ADDR) {
644                                         iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
645                                         iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
646                                 }
647                         }
648                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
649                                 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
650                                 tp->stats.rx_errors++;
651                                 tulip_start_rxtx(tp);
652                         }
653                         /*
654                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
655                          * call is ever done under the spinlock
656                          */
657                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
658                                 if (tp->link_change)
659                                         (tp->link_change)(dev, csr5);
660                         }
661                         if (csr5 & SystemError) {
662                                 int error = (csr5 >> 23) & 7;
663                                 /* oops, we hit a PCI error.  The code produced corresponds
664                                  * to the reason:
665                                  *  0 - parity error
666                                  *  1 - master abort
667                                  *  2 - target abort
668                                  * Note that on parity error, we should do a software reset
669                                  * of the chip to get it back into a sane state (according
670                                  * to the 21142/3 docs that is).
671                                  *   -- rmk
672                                  */
673                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
674                                         dev->name, tp->nir, error);
675                         }
676                         /* Clear all error sources, included undocumented ones! */
677                         iowrite32(0x0800f7ba, ioaddr + CSR5);
678                         oi++;
679                 }
680                 if (csr5 & TimerInt) {
681
682                         if (tulip_debug > 2)
683                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
684                                            dev->name, csr5);
685                         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
686                         tp->ttimer = 0;
687                         oi++;
688                 }
689                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
690                         if (tulip_debug > 1)
691                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
692                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
693
694                        /* Acknowledge all interrupt sources. */
695                         iowrite32(0x8001ffff, ioaddr + CSR5);
696                         if (tp->flags & HAS_INTR_MITIGATION) {
697                      /* Josip Loncaric at ICASE did extensive experimentation
698                         to develop a good interrupt mitigation setting.*/
699                                 iowrite32(0x8b240000, ioaddr + CSR11);
700                         } else if (tp->chip_id == LC82C168) {
701                                 /* the LC82C168 doesn't have a hw timer.*/
702                                 iowrite32(0x00, ioaddr + CSR7);
703                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
704                         } else {
705                           /* Mask all interrupting sources, set timer to
706                                 re-enable. */
707                                 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
708                                 iowrite32(0x0012, ioaddr + CSR11);
709                         }
710                         break;
711                 }
712
713                 work_count--;
714                 if (work_count == 0)
715                         break;
716
717                 csr5 = ioread32(ioaddr + CSR5);
718
719 #ifdef CONFIG_TULIP_NAPI
720                 if (rxd)
721                         csr5 &= ~RxPollInt;
722         } while ((csr5 & (TxNoBuf |
723                           TxDied |
724                           TxIntr |
725                           TimerInt |
726                           /* Abnormal intr. */
727                           RxDied |
728                           TxFIFOUnderflow |
729                           TxJabber |
730                           TPLnkFail |
731                           SystemError )) != 0);
732 #else
733         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
734
735         tulip_refill_rx(dev);
736
737         /* check if the card is in suspend mode */
738         entry = tp->dirty_rx % RX_RING_SIZE;
739         if (tp->rx_buffers[entry].skb == NULL) {
740                 if (tulip_debug > 1)
741                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
742                 if (tp->chip_id == LC82C168) {
743                         iowrite32(0x00, ioaddr + CSR7);
744                         mod_timer(&tp->timer, RUN_AT(HZ/50));
745                 } else {
746                         if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
747                                 if (tulip_debug > 1)
748                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
749                                 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
750                                         ioaddr + CSR7);
751                                 iowrite32(TimerInt, ioaddr + CSR5);
752                                 iowrite32(12, ioaddr + CSR11);
753                                 tp->ttimer = 1;
754                         }
755                 }
756         }
757 #endif /* CONFIG_TULIP_NAPI */
758
759         if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
760                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
761         }
762
763         if (tulip_debug > 4)
764                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
765                            dev->name, ioread32(ioaddr + CSR5));
766
767         return IRQ_HANDLED;
768 }