typhoon: Add missing firmware copy.
[pandora-kernel.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.8"
104 #define DRV_MODULE_RELDATE      "06/11/09"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/mm.h>
121 #include <linux/init.h>
122 #include <linux/delay.h>
123 #include <linux/ethtool.h>
124 #include <linux/if_vlan.h>
125 #include <linux/crc32.h>
126 #include <linux/bitops.h>
127 #include <asm/processor.h>
128 #include <asm/io.h>
129 #include <asm/uaccess.h>
130 #include <linux/in6.h>
131 #include <linux/dma-mapping.h>
132 #include <linux/firmware.h>
133
134 #include "typhoon.h"
135
136 static char version[] __devinitdata =
137     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 #define FIRMWARE_NAME           "3com/typhoon.bin"
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_FIRMWARE(FIRMWARE_NAME);
144 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
145 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
146                                "the buffer given back to the NIC. Default "
147                                "is 200.");
148 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
149                            "Default is to try MMIO and fallback to PIO.");
150 module_param(rx_copybreak, int, 0);
151 module_param(use_mmio, int, 0);
152
153 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
154 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
155 #undef NETIF_F_TSO
156 #endif
157
158 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
159 #error TX ring too small!
160 #endif
161
162 struct typhoon_card_info {
163         char *name;
164         int capabilities;
165 };
166
167 #define TYPHOON_CRYPTO_NONE             0x00
168 #define TYPHOON_CRYPTO_DES              0x01
169 #define TYPHOON_CRYPTO_3DES             0x02
170 #define TYPHOON_CRYPTO_VARIABLE         0x04
171 #define TYPHOON_FIBER                   0x08
172 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
173
174 enum typhoon_cards {
175         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
176         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
177         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
178         TYPHOON_FXM,
179 };
180
181 /* directly indexed by enum typhoon_cards, above */
182 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
183         { "3Com Typhoon (3C990-TX)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990-TX-95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990-TX-97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon (3C990SVR)",
190                 TYPHOON_CRYPTO_NONE},
191         { "3Com Typhoon (3CR990SVR95)",
192                 TYPHOON_CRYPTO_DES},
193         { "3Com Typhoon (3CR990SVR97)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
195         { "3Com Typhoon2 (3C990B-TX-M)",
196                 TYPHOON_CRYPTO_VARIABLE},
197         { "3Com Typhoon2 (3C990BSVR)",
198                 TYPHOON_CRYPTO_VARIABLE},
199         { "3Com Typhoon (3CR990-FX-95)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
201         { "3Com Typhoon (3CR990-FX-97)",
202                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
203         { "3Com Typhoon (3CR990-FX-95 Server)",
204                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
205         { "3Com Typhoon (3CR990-FX-97 Server)",
206                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
207         { "3Com Typhoon2 (3C990B-FX-97)",
208                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
209 };
210
211 /* Notes on the new subsystem numbering scheme:
212  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
213  * bit 4 indicates if this card has secured firmware (we don't support it)
214  * bit 8 indicates if this is a (0) copper or (1) fiber card
215  * bits 12-16 indicate card type: (0) client and (1) server
216  */
217 static struct pci_device_id typhoon_pci_tbl[] = {
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
225           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
229           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
233           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
235           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
237           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
238         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
240         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
242         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
244         { 0, }
245 };
246 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
247
248 /* Define the shared memory area
249  * Align everything the 3XP will normally be using.
250  * We'll need to move/align txHi if we start using that ring.
251  */
252 #define __3xp_aligned   ____cacheline_aligned
253 struct typhoon_shared {
254         struct typhoon_interface        iface;
255         struct typhoon_indexes          indexes                 __3xp_aligned;
256         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
257         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
258         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
259         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
260         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
261         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
262         u32                             zeroWord;
263         struct tx_desc                  txHi[TXHI_ENTRIES];
264 } __attribute__ ((packed));
265
266 struct rxbuff_ent {
267         struct sk_buff *skb;
268         dma_addr_t      dma_addr;
269 };
270
271 struct typhoon {
272         /* Tx cache line section */
273         struct transmit_ring    txLoRing        ____cacheline_aligned;
274         struct pci_dev *        tx_pdev;
275         void __iomem            *tx_ioaddr;
276         u32                     txlo_dma_addr;
277
278         /* Irq/Rx cache line section */
279         void __iomem            *ioaddr         ____cacheline_aligned;
280         struct typhoon_indexes *indexes;
281         u8                      awaiting_resp;
282         u8                      duplex;
283         u8                      speed;
284         u8                      card_state;
285         struct basic_ring       rxLoRing;
286         struct pci_dev *        pdev;
287         struct net_device *     dev;
288         struct napi_struct      napi;
289         spinlock_t              state_lock;
290         struct vlan_group *     vlgrp;
291         struct basic_ring       rxHiRing;
292         struct basic_ring       rxBuffRing;
293         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
294
295         /* general section */
296         spinlock_t              command_lock    ____cacheline_aligned;
297         struct basic_ring       cmdRing;
298         struct basic_ring       respRing;
299         struct net_device_stats stats;
300         struct net_device_stats stats_saved;
301         const char *            name;
302         struct typhoon_shared * shared;
303         dma_addr_t              shared_dma;
304         __le16                  xcvr_select;
305         __le16                  wol_events;
306         __le32                  offload;
307
308         /* unused stuff (future use) */
309         int                     capabilities;
310         struct transmit_ring    txHiRing;
311 };
312
313 enum completion_wait_values {
314         NoWait = 0, WaitNoSleep, WaitSleep,
315 };
316
317 /* These are the values for the typhoon.card_state variable.
318  * These determine where the statistics will come from in get_stats().
319  * The sleep image does not support the statistics we need.
320  */
321 enum state_values {
322         Sleeping = 0, Running,
323 };
324
325 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
326  * cannot pass a read, so this forces current writes to post.
327  */
328 #define typhoon_post_pci_writes(x) \
329         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
330
331 /* We'll wait up to six seconds for a reset, and half a second normally.
332  */
333 #define TYPHOON_UDELAY                  50
334 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
335 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
336 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
337
338 #if defined(NETIF_F_TSO)
339 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
340 #define TSO_NUM_DESCRIPTORS     2
341 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
342 #else
343 #define NETIF_F_TSO             0
344 #define skb_tso_size(x)         0
345 #define TSO_NUM_DESCRIPTORS     0
346 #define TSO_OFFLOAD_ON          0
347 #endif
348
349 static inline void
350 typhoon_inc_index(u32 *index, const int count, const int num_entries)
351 {
352         /* Increment a ring index -- we can use this for all rings execept
353          * the Rx rings, as they use different size descriptors
354          * otherwise, everything is the same size as a cmd_desc
355          */
356         *index += count * sizeof(struct cmd_desc);
357         *index %= num_entries * sizeof(struct cmd_desc);
358 }
359
360 static inline void
361 typhoon_inc_cmd_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, COMMAND_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_resp_index(u32 *index, const int count)
368 {
369         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_rxfree_index(u32 *index, const int count)
374 {
375         typhoon_inc_index(index, count, RXFREE_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_tx_index(u32 *index, const int count)
380 {
381         /* if we start using the Hi Tx ring, this needs updateing */
382         typhoon_inc_index(index, count, TXLO_ENTRIES);
383 }
384
385 static inline void
386 typhoon_inc_rx_index(u32 *index, const int count)
387 {
388         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
389         *index += count * sizeof(struct rx_desc);
390         *index %= RX_ENTRIES * sizeof(struct rx_desc);
391 }
392
393 static int
394 typhoon_reset(void __iomem *ioaddr, int wait_type)
395 {
396         int i, err = 0;
397         int timeout;
398
399         if(wait_type == WaitNoSleep)
400                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
401         else
402                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
403
404         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
406
407         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
408         typhoon_post_pci_writes(ioaddr);
409         udelay(1);
410         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
411
412         if(wait_type != NoWait) {
413                 for(i = 0; i < timeout; i++) {
414                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
415                            TYPHOON_STATUS_WAITING_FOR_HOST)
416                                 goto out;
417
418                         if(wait_type == WaitSleep)
419                                 schedule_timeout_uninterruptible(1);
420                         else
421                                 udelay(TYPHOON_UDELAY);
422                 }
423
424                 err = -ETIMEDOUT;
425         }
426
427 out:
428         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
430
431         /* The 3XP seems to need a little extra time to complete the load
432          * of the sleep image before we can reliably boot it. Failure to
433          * do this occasionally results in a hung adapter after boot in
434          * typhoon_init_one() while trying to read the MAC address or
435          * putting the card to sleep. 3Com's driver waits 5ms, but
436          * that seems to be overkill. However, if we can sleep, we might
437          * as well give it that much time. Otherwise, we'll give it 500us,
438          * which should be enough (I've see it work well at 100us, but still
439          * saw occasional problems.)
440          */
441         if(wait_type == WaitSleep)
442                 msleep(5);
443         else
444                 udelay(500);
445         return err;
446 }
447
448 static int
449 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
450 {
451         int i, err = 0;
452
453         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
454                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
455                         goto out;
456                 udelay(TYPHOON_UDELAY);
457         }
458
459         err = -ETIMEDOUT;
460
461 out:
462         return err;
463 }
464
465 static inline void
466 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
467 {
468         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
469                 netif_carrier_off(dev);
470         else
471                 netif_carrier_on(dev);
472 }
473
474 static inline void
475 typhoon_hello(struct typhoon *tp)
476 {
477         struct basic_ring *ring = &tp->cmdRing;
478         struct cmd_desc *cmd;
479
480         /* We only get a hello request if we've not sent anything to the
481          * card in a long while. If the lock is held, then we're in the
482          * process of issuing a command, so we don't need to respond.
483          */
484         if(spin_trylock(&tp->command_lock)) {
485                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
486                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
487
488                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
489                 smp_wmb();
490                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
491                 spin_unlock(&tp->command_lock);
492         }
493 }
494
495 static int
496 typhoon_process_response(struct typhoon *tp, int resp_size,
497                                 struct resp_desc *resp_save)
498 {
499         struct typhoon_indexes *indexes = tp->indexes;
500         struct resp_desc *resp;
501         u8 *base = tp->respRing.ringBase;
502         int count, len, wrap_len;
503         u32 cleared;
504         u32 ready;
505
506         cleared = le32_to_cpu(indexes->respCleared);
507         ready = le32_to_cpu(indexes->respReady);
508         while(cleared != ready) {
509                 resp = (struct resp_desc *)(base + cleared);
510                 count = resp->numDesc + 1;
511                 if(resp_save && resp->seqNo) {
512                         if(count > resp_size) {
513                                 resp_save->flags = TYPHOON_RESP_ERROR;
514                                 goto cleanup;
515                         }
516
517                         wrap_len = 0;
518                         len = count * sizeof(*resp);
519                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
520                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
521                                 len = RESPONSE_RING_SIZE - cleared;
522                         }
523
524                         memcpy(resp_save, resp, len);
525                         if(unlikely(wrap_len)) {
526                                 resp_save += len / sizeof(*resp);
527                                 memcpy(resp_save, base, wrap_len);
528                         }
529
530                         resp_save = NULL;
531                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
532                         typhoon_media_status(tp->dev, resp);
533                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
534                         typhoon_hello(tp);
535                 } else {
536                         printk(KERN_ERR "%s: dumping unexpected response "
537                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
538                                tp->name, le16_to_cpu(resp->cmd),
539                                resp->numDesc, resp->flags,
540                                le16_to_cpu(resp->parm1),
541                                le32_to_cpu(resp->parm2),
542                                le32_to_cpu(resp->parm3));
543                 }
544
545 cleanup:
546                 typhoon_inc_resp_index(&cleared, count);
547         }
548
549         indexes->respCleared = cpu_to_le32(cleared);
550         wmb();
551         return (resp_save == NULL);
552 }
553
554 static inline int
555 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
556 {
557         /* this works for all descriptors but rx_desc, as they are a
558          * different size than the cmd_desc -- everyone else is the same
559          */
560         lastWrite /= sizeof(struct cmd_desc);
561         lastRead /= sizeof(struct cmd_desc);
562         return (ringSize + lastRead - lastWrite - 1) % ringSize;
563 }
564
565 static inline int
566 typhoon_num_free_cmd(struct typhoon *tp)
567 {
568         int lastWrite = tp->cmdRing.lastWrite;
569         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
570
571         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
572 }
573
574 static inline int
575 typhoon_num_free_resp(struct typhoon *tp)
576 {
577         int respReady = le32_to_cpu(tp->indexes->respReady);
578         int respCleared = le32_to_cpu(tp->indexes->respCleared);
579
580         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
581 }
582
583 static inline int
584 typhoon_num_free_tx(struct transmit_ring *ring)
585 {
586         /* if we start using the Hi Tx ring, this needs updating */
587         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
588 }
589
590 static int
591 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
592                       int num_resp, struct resp_desc *resp)
593 {
594         struct typhoon_indexes *indexes = tp->indexes;
595         struct basic_ring *ring = &tp->cmdRing;
596         struct resp_desc local_resp;
597         int i, err = 0;
598         int got_resp;
599         int freeCmd, freeResp;
600         int len, wrap_len;
601
602         spin_lock(&tp->command_lock);
603
604         freeCmd = typhoon_num_free_cmd(tp);
605         freeResp = typhoon_num_free_resp(tp);
606
607         if(freeCmd < num_cmd || freeResp < num_resp) {
608                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
609                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
610                         freeResp, num_resp);
611                 err = -ENOMEM;
612                 goto out;
613         }
614
615         if(cmd->flags & TYPHOON_CMD_RESPOND) {
616                 /* If we're expecting a response, but the caller hasn't given
617                  * us a place to put it, we'll provide one.
618                  */
619                 tp->awaiting_resp = 1;
620                 if(resp == NULL) {
621                         resp = &local_resp;
622                         num_resp = 1;
623                 }
624         }
625
626         wrap_len = 0;
627         len = num_cmd * sizeof(*cmd);
628         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
629                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
630                 len = COMMAND_RING_SIZE - ring->lastWrite;
631         }
632
633         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
634         if(unlikely(wrap_len)) {
635                 struct cmd_desc *wrap_ptr = cmd;
636                 wrap_ptr += len / sizeof(*cmd);
637                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
638         }
639
640         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641
642         /* "I feel a presence... another warrior is on the mesa."
643          */
644         wmb();
645         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
646         typhoon_post_pci_writes(tp->ioaddr);
647
648         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
649                 goto out;
650
651         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
652          * preempt or do anything other than take interrupts. So, don't
653          * wait for a response unless you have to.
654          *
655          * I've thought about trying to sleep here, but we're called
656          * from many contexts that don't allow that. Also, given the way
657          * 3Com has implemented irq coalescing, we would likely timeout --
658          * this has been observed in real life!
659          *
660          * The big killer is we have to wait to get stats from the card,
661          * though we could go to a periodic refresh of those if we don't
662          * mind them getting somewhat stale. The rest of the waiting
663          * commands occur during open/close/suspend/resume, so they aren't
664          * time critical. Creating SAs in the future will also have to
665          * wait here.
666          */
667         got_resp = 0;
668         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
669                 if(indexes->respCleared != indexes->respReady)
670                         got_resp = typhoon_process_response(tp, num_resp,
671                                                                 resp);
672                 udelay(TYPHOON_UDELAY);
673         }
674
675         if(!got_resp) {
676                 err = -ETIMEDOUT;
677                 goto out;
678         }
679
680         /* Collect the error response even if we don't care about the
681          * rest of the response
682          */
683         if(resp->flags & TYPHOON_RESP_ERROR)
684                 err = -EIO;
685
686 out:
687         if(tp->awaiting_resp) {
688                 tp->awaiting_resp = 0;
689                 smp_wmb();
690
691                 /* Ugh. If a response was added to the ring between
692                  * the call to typhoon_process_response() and the clearing
693                  * of tp->awaiting_resp, we could have missed the interrupt
694                  * and it could hang in the ring an indeterminate amount of
695                  * time. So, check for it, and interrupt ourselves if this
696                  * is the case.
697                  */
698                 if(indexes->respCleared != indexes->respReady)
699                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
700         }
701
702         spin_unlock(&tp->command_lock);
703         return err;
704 }
705
706 static void
707 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
708 {
709         struct typhoon *tp = netdev_priv(dev);
710         struct cmd_desc xp_cmd;
711         int err;
712
713         spin_lock_bh(&tp->state_lock);
714         if(!tp->vlgrp != !grp) {
715                 /* We've either been turned on for the first time, or we've
716                  * been turned off. Update the 3XP.
717                  */
718                 if(grp)
719                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
720                 else
721                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
722
723                 /* If the interface is up, the runtime is running -- and we
724                  * must be up for the vlan core to call us.
725                  *
726                  * Do the command outside of the spin lock, as it is slow.
727                  */
728                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
729                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
730                 xp_cmd.parm2 = tp->offload;
731                 xp_cmd.parm3 = tp->offload;
732                 spin_unlock_bh(&tp->state_lock);
733                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
734                 if(err < 0)
735                         printk("%s: vlan offload error %d\n", tp->name, -err);
736                 spin_lock_bh(&tp->state_lock);
737         }
738
739         /* now make the change visible */
740         tp->vlgrp = grp;
741         spin_unlock_bh(&tp->state_lock);
742 }
743
744 static inline void
745 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
746                         u32 ring_dma)
747 {
748         struct tcpopt_desc *tcpd;
749         u32 tcpd_offset = ring_dma;
750
751         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
752         tcpd_offset += txRing->lastWrite;
753         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
754         typhoon_inc_tx_index(&txRing->lastWrite, 1);
755
756         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
757         tcpd->numDesc = 1;
758         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
759         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
760         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
761         tcpd->bytesTx = cpu_to_le32(skb->len);
762         tcpd->status = 0;
763 }
764
765 static int
766 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
767 {
768         struct typhoon *tp = netdev_priv(dev);
769         struct transmit_ring *txRing;
770         struct tx_desc *txd, *first_txd;
771         dma_addr_t skb_dma;
772         int numDesc;
773
774         /* we have two rings to choose from, but we only use txLo for now
775          * If we start using the Hi ring as well, we'll need to update
776          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
777          * and TXHI_ENTRIES to match, as well as update the TSO code below
778          * to get the right DMA address
779          */
780         txRing = &tp->txLoRing;
781
782         /* We need one descriptor for each fragment of the sk_buff, plus the
783          * one for the ->data area of it.
784          *
785          * The docs say a maximum of 16 fragment descriptors per TCP option
786          * descriptor, then make a new packet descriptor and option descriptor
787          * for the next 16 fragments. The engineers say just an option
788          * descriptor is needed. I've tested up to 26 fragments with a single
789          * packet descriptor/option descriptor combo, so I use that for now.
790          *
791          * If problems develop with TSO, check this first.
792          */
793         numDesc = skb_shinfo(skb)->nr_frags + 1;
794         if (skb_is_gso(skb))
795                 numDesc++;
796
797         /* When checking for free space in the ring, we need to also
798          * account for the initial Tx descriptor, and we always must leave
799          * at least one descriptor unused in the ring so that it doesn't
800          * wrap and look empty.
801          *
802          * The only time we should loop here is when we hit the race
803          * between marking the queue awake and updating the cleared index.
804          * Just loop and it will appear. This comes from the acenic driver.
805          */
806         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
807                 smp_rmb();
808
809         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
810         typhoon_inc_tx_index(&txRing->lastWrite, 1);
811
812         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
813         first_txd->numDesc = 0;
814         first_txd->len = 0;
815         first_txd->tx_addr = (u64)((unsigned long) skb);
816         first_txd->processFlags = 0;
817
818         if(skb->ip_summed == CHECKSUM_PARTIAL) {
819                 /* The 3XP will figure out if this is UDP/TCP */
820                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
821                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
822                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
823         }
824
825         if(vlan_tx_tag_present(skb)) {
826                 first_txd->processFlags |=
827                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
828                 first_txd->processFlags |=
829                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
830                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
831         }
832
833         if (skb_is_gso(skb)) {
834                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
835                 first_txd->numDesc++;
836
837                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
838         }
839
840         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
841         typhoon_inc_tx_index(&txRing->lastWrite, 1);
842
843         /* No need to worry about padding packet -- the firmware pads
844          * it with zeros to ETH_ZLEN for us.
845          */
846         if(skb_shinfo(skb)->nr_frags == 0) {
847                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
848                                        PCI_DMA_TODEVICE);
849                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
850                 txd->len = cpu_to_le16(skb->len);
851                 txd->frag.addr = cpu_to_le32(skb_dma);
852                 txd->frag.addrHi = 0;
853                 first_txd->numDesc++;
854         } else {
855                 int i, len;
856
857                 len = skb_headlen(skb);
858                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
859                                          PCI_DMA_TODEVICE);
860                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
861                 txd->len = cpu_to_le16(len);
862                 txd->frag.addr = cpu_to_le32(skb_dma);
863                 txd->frag.addrHi = 0;
864                 first_txd->numDesc++;
865
866                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
867                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
868                         void *frag_addr;
869
870                         txd = (struct tx_desc *) (txRing->ringBase +
871                                                 txRing->lastWrite);
872                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
873
874                         len = frag->size;
875                         frag_addr = (void *) page_address(frag->page) +
876                                                 frag->page_offset;
877                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
878                                          PCI_DMA_TODEVICE);
879                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
880                         txd->len = cpu_to_le16(len);
881                         txd->frag.addr = cpu_to_le32(skb_dma);
882                         txd->frag.addrHi = 0;
883                         first_txd->numDesc++;
884                 }
885         }
886
887         /* Kick the 3XP
888          */
889         wmb();
890         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
891
892         dev->trans_start = jiffies;
893
894         /* If we don't have room to put the worst case packet on the
895          * queue, then we must stop the queue. We need 2 extra
896          * descriptors -- one to prevent ring wrap, and one for the
897          * Tx header.
898          */
899         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
900
901         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
902                 netif_stop_queue(dev);
903
904                 /* A Tx complete IRQ could have gotten inbetween, making
905                  * the ring free again. Only need to recheck here, since
906                  * Tx is serialized.
907                  */
908                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
909                         netif_wake_queue(dev);
910         }
911
912         return 0;
913 }
914
915 static void
916 typhoon_set_rx_mode(struct net_device *dev)
917 {
918         struct typhoon *tp = netdev_priv(dev);
919         struct cmd_desc xp_cmd;
920         u32 mc_filter[2];
921         __le16 filter;
922
923         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
924         if(dev->flags & IFF_PROMISC) {
925                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
926         } else if((dev->mc_count > multicast_filter_limit) ||
927                   (dev->flags & IFF_ALLMULTI)) {
928                 /* Too many to match, or accept all multicasts. */
929                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
930         } else if(dev->mc_count) {
931                 struct dev_mc_list *mclist;
932                 int i;
933
934                 memset(mc_filter, 0, sizeof(mc_filter));
935                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
936                     i++, mclist = mclist->next) {
937                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
938                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
939                 }
940
941                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
942                                          TYPHOON_CMD_SET_MULTICAST_HASH);
943                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
944                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
945                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
946                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
947
948                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
949         }
950
951         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
952         xp_cmd.parm1 = filter;
953         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
954 }
955
956 static int
957 typhoon_do_get_stats(struct typhoon *tp)
958 {
959         struct net_device_stats *stats = &tp->stats;
960         struct net_device_stats *saved = &tp->stats_saved;
961         struct cmd_desc xp_cmd;
962         struct resp_desc xp_resp[7];
963         struct stats_resp *s = (struct stats_resp *) xp_resp;
964         int err;
965
966         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
967         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
968         if(err < 0)
969                 return err;
970
971         /* 3Com's Linux driver uses txMultipleCollisions as it's
972          * collisions value, but there is some other collision info as well...
973          *
974          * The extra status reported would be a good candidate for
975          * ethtool_ops->get_{strings,stats}()
976          */
977         stats->tx_packets = le32_to_cpu(s->txPackets);
978         stats->tx_bytes = le64_to_cpu(s->txBytes);
979         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
980         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
981         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
982         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
983         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
984         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
985         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
986                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
987         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
988         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
989         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
990                         SPEED_100 : SPEED_10;
991         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
992                         DUPLEX_FULL : DUPLEX_HALF;
993
994         /* add in the saved statistics
995          */
996         stats->tx_packets += saved->tx_packets;
997         stats->tx_bytes += saved->tx_bytes;
998         stats->tx_errors += saved->tx_errors;
999         stats->collisions += saved->collisions;
1000         stats->rx_packets += saved->rx_packets;
1001         stats->rx_bytes += saved->rx_bytes;
1002         stats->rx_fifo_errors += saved->rx_fifo_errors;
1003         stats->rx_errors += saved->rx_errors;
1004         stats->rx_crc_errors += saved->rx_crc_errors;
1005         stats->rx_length_errors += saved->rx_length_errors;
1006
1007         return 0;
1008 }
1009
1010 static struct net_device_stats *
1011 typhoon_get_stats(struct net_device *dev)
1012 {
1013         struct typhoon *tp = netdev_priv(dev);
1014         struct net_device_stats *stats = &tp->stats;
1015         struct net_device_stats *saved = &tp->stats_saved;
1016
1017         smp_rmb();
1018         if(tp->card_state == Sleeping)
1019                 return saved;
1020
1021         if(typhoon_do_get_stats(tp) < 0) {
1022                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1023                 return saved;
1024         }
1025
1026         return stats;
1027 }
1028
1029 static int
1030 typhoon_set_mac_address(struct net_device *dev, void *addr)
1031 {
1032         struct sockaddr *saddr = (struct sockaddr *) addr;
1033
1034         if(netif_running(dev))
1035                 return -EBUSY;
1036
1037         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1038         return 0;
1039 }
1040
1041 static void
1042 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1043 {
1044         struct typhoon *tp = netdev_priv(dev);
1045         struct pci_dev *pci_dev = tp->pdev;
1046         struct cmd_desc xp_cmd;
1047         struct resp_desc xp_resp[3];
1048
1049         smp_rmb();
1050         if(tp->card_state == Sleeping) {
1051                 strcpy(info->fw_version, "Sleep image");
1052         } else {
1053                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1054                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1055                         strcpy(info->fw_version, "Unknown runtime");
1056                 } else {
1057                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1058                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1059                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1060                                  sleep_ver & 0xfff);
1061                 }
1062         }
1063
1064         strcpy(info->driver, DRV_MODULE_NAME);
1065         strcpy(info->version, DRV_MODULE_VERSION);
1066         strcpy(info->bus_info, pci_name(pci_dev));
1067 }
1068
1069 static int
1070 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 {
1072         struct typhoon *tp = netdev_priv(dev);
1073
1074         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1075                                 SUPPORTED_Autoneg;
1076
1077         switch (tp->xcvr_select) {
1078         case TYPHOON_XCVR_10HALF:
1079                 cmd->advertising = ADVERTISED_10baseT_Half;
1080                 break;
1081         case TYPHOON_XCVR_10FULL:
1082                 cmd->advertising = ADVERTISED_10baseT_Full;
1083                 break;
1084         case TYPHOON_XCVR_100HALF:
1085                 cmd->advertising = ADVERTISED_100baseT_Half;
1086                 break;
1087         case TYPHOON_XCVR_100FULL:
1088                 cmd->advertising = ADVERTISED_100baseT_Full;
1089                 break;
1090         case TYPHOON_XCVR_AUTONEG:
1091                 cmd->advertising = ADVERTISED_10baseT_Half |
1092                                             ADVERTISED_10baseT_Full |
1093                                             ADVERTISED_100baseT_Half |
1094                                             ADVERTISED_100baseT_Full |
1095                                             ADVERTISED_Autoneg;
1096                 break;
1097         }
1098
1099         if(tp->capabilities & TYPHOON_FIBER) {
1100                 cmd->supported |= SUPPORTED_FIBRE;
1101                 cmd->advertising |= ADVERTISED_FIBRE;
1102                 cmd->port = PORT_FIBRE;
1103         } else {
1104                 cmd->supported |= SUPPORTED_10baseT_Half |
1105                                         SUPPORTED_10baseT_Full |
1106                                         SUPPORTED_TP;
1107                 cmd->advertising |= ADVERTISED_TP;
1108                 cmd->port = PORT_TP;
1109         }
1110
1111         /* need to get stats to make these link speed/duplex valid */
1112         typhoon_do_get_stats(tp);
1113         cmd->speed = tp->speed;
1114         cmd->duplex = tp->duplex;
1115         cmd->phy_address = 0;
1116         cmd->transceiver = XCVR_INTERNAL;
1117         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1118                 cmd->autoneg = AUTONEG_ENABLE;
1119         else
1120                 cmd->autoneg = AUTONEG_DISABLE;
1121         cmd->maxtxpkt = 1;
1122         cmd->maxrxpkt = 1;
1123
1124         return 0;
1125 }
1126
1127 static int
1128 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1129 {
1130         struct typhoon *tp = netdev_priv(dev);
1131         struct cmd_desc xp_cmd;
1132         __le16 xcvr;
1133         int err;
1134
1135         err = -EINVAL;
1136         if(cmd->autoneg == AUTONEG_ENABLE) {
1137                 xcvr = TYPHOON_XCVR_AUTONEG;
1138         } else {
1139                 if(cmd->duplex == DUPLEX_HALF) {
1140                         if(cmd->speed == SPEED_10)
1141                                 xcvr = TYPHOON_XCVR_10HALF;
1142                         else if(cmd->speed == SPEED_100)
1143                                 xcvr = TYPHOON_XCVR_100HALF;
1144                         else
1145                                 goto out;
1146                 } else if(cmd->duplex == DUPLEX_FULL) {
1147                         if(cmd->speed == SPEED_10)
1148                                 xcvr = TYPHOON_XCVR_10FULL;
1149                         else if(cmd->speed == SPEED_100)
1150                                 xcvr = TYPHOON_XCVR_100FULL;
1151                         else
1152                                 goto out;
1153                 } else
1154                         goto out;
1155         }
1156
1157         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1158         xp_cmd.parm1 = xcvr;
1159         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1160         if(err < 0)
1161                 goto out;
1162
1163         tp->xcvr_select = xcvr;
1164         if(cmd->autoneg == AUTONEG_ENABLE) {
1165                 tp->speed = 0xff;       /* invalid */
1166                 tp->duplex = 0xff;      /* invalid */
1167         } else {
1168                 tp->speed = cmd->speed;
1169                 tp->duplex = cmd->duplex;
1170         }
1171
1172 out:
1173         return err;
1174 }
1175
1176 static void
1177 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1178 {
1179         struct typhoon *tp = netdev_priv(dev);
1180
1181         wol->supported = WAKE_PHY | WAKE_MAGIC;
1182         wol->wolopts = 0;
1183         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1184                 wol->wolopts |= WAKE_PHY;
1185         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1186                 wol->wolopts |= WAKE_MAGIC;
1187         memset(&wol->sopass, 0, sizeof(wol->sopass));
1188 }
1189
1190 static int
1191 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1192 {
1193         struct typhoon *tp = netdev_priv(dev);
1194
1195         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1196                 return -EINVAL;
1197
1198         tp->wol_events = 0;
1199         if(wol->wolopts & WAKE_PHY)
1200                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1201         if(wol->wolopts & WAKE_MAGIC)
1202                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1203
1204         return 0;
1205 }
1206
1207 static u32
1208 typhoon_get_rx_csum(struct net_device *dev)
1209 {
1210         /* For now, we don't allow turning off RX checksums.
1211          */
1212         return 1;
1213 }
1214
1215 static void
1216 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1217 {
1218         ering->rx_max_pending = RXENT_ENTRIES;
1219         ering->rx_mini_max_pending = 0;
1220         ering->rx_jumbo_max_pending = 0;
1221         ering->tx_max_pending = TXLO_ENTRIES - 1;
1222
1223         ering->rx_pending = RXENT_ENTRIES;
1224         ering->rx_mini_pending = 0;
1225         ering->rx_jumbo_pending = 0;
1226         ering->tx_pending = TXLO_ENTRIES - 1;
1227 }
1228
1229 static const struct ethtool_ops typhoon_ethtool_ops = {
1230         .get_settings           = typhoon_get_settings,
1231         .set_settings           = typhoon_set_settings,
1232         .get_drvinfo            = typhoon_get_drvinfo,
1233         .get_wol                = typhoon_get_wol,
1234         .set_wol                = typhoon_set_wol,
1235         .get_link               = ethtool_op_get_link,
1236         .get_rx_csum            = typhoon_get_rx_csum,
1237         .set_tx_csum            = ethtool_op_set_tx_csum,
1238         .set_sg                 = ethtool_op_set_sg,
1239         .set_tso                = ethtool_op_set_tso,
1240         .get_ringparam          = typhoon_get_ringparam,
1241 };
1242
1243 static int
1244 typhoon_wait_interrupt(void __iomem *ioaddr)
1245 {
1246         int i, err = 0;
1247
1248         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1249                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1250                    TYPHOON_INTR_BOOTCMD)
1251                         goto out;
1252                 udelay(TYPHOON_UDELAY);
1253         }
1254
1255         err = -ETIMEDOUT;
1256
1257 out:
1258         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1259         return err;
1260 }
1261
1262 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1263
1264 static void
1265 typhoon_init_interface(struct typhoon *tp)
1266 {
1267         struct typhoon_interface *iface = &tp->shared->iface;
1268         dma_addr_t shared_dma;
1269
1270         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271
1272         /* The *Hi members of iface are all init'd to zero by the memset().
1273          */
1274         shared_dma = tp->shared_dma + shared_offset(indexes);
1275         iface->ringIndex = cpu_to_le32(shared_dma);
1276
1277         shared_dma = tp->shared_dma + shared_offset(txLo);
1278         iface->txLoAddr = cpu_to_le32(shared_dma);
1279         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280
1281         shared_dma = tp->shared_dma + shared_offset(txHi);
1282         iface->txHiAddr = cpu_to_le32(shared_dma);
1283         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284
1285         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1286         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1287         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1288                                         sizeof(struct rx_free));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxLo);
1291         iface->rxLoAddr = cpu_to_le32(shared_dma);
1292         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293
1294         shared_dma = tp->shared_dma + shared_offset(rxHi);
1295         iface->rxHiAddr = cpu_to_le32(shared_dma);
1296         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297
1298         shared_dma = tp->shared_dma + shared_offset(cmd);
1299         iface->cmdAddr = cpu_to_le32(shared_dma);
1300         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301
1302         shared_dma = tp->shared_dma + shared_offset(resp);
1303         iface->respAddr = cpu_to_le32(shared_dma);
1304         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305
1306         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1307         iface->zeroAddr = cpu_to_le32(shared_dma);
1308
1309         tp->indexes = &tp->shared->indexes;
1310         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1311         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1312         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1313         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1314         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1315         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1316         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317
1318         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1319         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320
1321         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1322         tp->card_state = Sleeping;
1323         smp_wmb();
1324
1325         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1326         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327
1328         spin_lock_init(&tp->command_lock);
1329         spin_lock_init(&tp->state_lock);
1330 }
1331
1332 static void
1333 typhoon_init_rings(struct typhoon *tp)
1334 {
1335         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336
1337         tp->txLoRing.lastWrite = 0;
1338         tp->txHiRing.lastWrite = 0;
1339         tp->rxLoRing.lastWrite = 0;
1340         tp->rxHiRing.lastWrite = 0;
1341         tp->rxBuffRing.lastWrite = 0;
1342         tp->cmdRing.lastWrite = 0;
1343         tp->cmdRing.lastWrite = 0;
1344
1345         tp->txLoRing.lastRead = 0;
1346         tp->txHiRing.lastRead = 0;
1347 }
1348
1349 static const struct firmware *typhoon_fw;
1350 static u8 *typhoon_fw_image;
1351
1352 static int
1353 typhoon_request_firmware(struct typhoon *tp)
1354 {
1355         int err;
1356
1357         if (typhoon_fw)
1358                 return 0;
1359
1360         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1361         if (err) {
1362                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
1363                        tp->name, FIRMWARE_NAME);
1364                 return err;
1365         }
1366
1367         if (typhoon_fw->size < sizeof(struct typhoon_file_header) ||
1368             memcmp(typhoon_fw->data, "TYPHOON", 8)) {
1369                 printk(KERN_ERR "%s: Invalid firmware image\n",
1370                        tp->name);
1371                 err = -EINVAL;
1372                 goto out_err;
1373         }
1374
1375         typhoon_fw_image = kmalloc(typhoon_fw->size, GFP_KERNEL);
1376         if (!typhoon_fw_image) {
1377                 err = -ENOMEM;
1378                 goto out_err;
1379         }
1380         memcpy(typhoon_fw_image, typhoon_fw->data, typhoon_fw->size);
1381
1382         return 0;
1383
1384 out_err:
1385         release_firmware(typhoon_fw);
1386         typhoon_fw = NULL;
1387         return err;
1388 }
1389
1390 static int
1391 typhoon_download_firmware(struct typhoon *tp)
1392 {
1393         void __iomem *ioaddr = tp->ioaddr;
1394         struct pci_dev *pdev = tp->pdev;
1395         const struct typhoon_file_header *fHdr;
1396         const struct typhoon_section_header *sHdr;
1397         const u8 *image_data;
1398         dma_addr_t image_dma;
1399         __sum16 csum;
1400         u32 irqEnabled;
1401         u32 irqMasked;
1402         u32 numSections;
1403         u32 section_len;
1404         u32 load_addr;
1405         u32 hmac;
1406         int i;
1407         int err;
1408
1409         image_data = typhoon_fw_image;
1410         fHdr = (struct typhoon_file_header *) image_data;
1411
1412         err = -ENOMEM;
1413         image_dma = pci_map_single(pdev, (u8 *) image_data,
1414                                    typhoon_fw->size, PCI_DMA_TODEVICE);
1415         if (pci_dma_mapping_error(pdev, image_dma)) {
1416                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1417                 goto err_out;
1418         }
1419
1420         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1421         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1422                ioaddr + TYPHOON_REG_INTR_ENABLE);
1423         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1424         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1425                ioaddr + TYPHOON_REG_INTR_MASK);
1426
1427         err = -ETIMEDOUT;
1428         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1429                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1430                 goto err_out_irq;
1431         }
1432
1433         numSections = le32_to_cpu(fHdr->numSections);
1434         load_addr = le32_to_cpu(fHdr->startAddr);
1435
1436         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1437         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1438         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1439         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1440         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1441         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1442         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1443         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1444         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1445         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1446         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1447         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1448         typhoon_post_pci_writes(ioaddr);
1449         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1450
1451         image_data += sizeof(struct typhoon_file_header);
1452
1453         /* The ioread32() in typhoon_wait_interrupt() will force the
1454          * last write to the command register to post, so
1455          * we don't need a typhoon_post_pci_writes() after it.
1456          */
1457         for(i = 0; i < numSections; i++) {
1458                 sHdr = (struct typhoon_section_header *) image_data;
1459                 image_data += sizeof(struct typhoon_section_header);
1460                 load_addr = le32_to_cpu(sHdr->startAddr);
1461                 section_len = le32_to_cpu(sHdr->len);
1462
1463                 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1464                     ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1465                     TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1466                         printk(KERN_ERR "%s: segment ready timeout\n",
1467                                tp->name);
1468                         goto err_out_irq;
1469                 }
1470
1471                 /* Do an pseudo IPv4 checksum on the data -- first
1472                  * need to convert each u16 to cpu order before
1473                  * summing. Fortunately, due to the properties of
1474                  * the checksum, we can do this once, at the end.
1475                  */
1476                 csum = csum_fold(csum_partial(image_data, section_len, 0));
1477
1478                 iowrite32(section_len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1479                 iowrite32(le16_to_cpu((__force __le16)csum),
1480                           ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1481                 iowrite32(load_addr,
1482                           ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1483                 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1484                 iowrite32(image_dma + (image_data - typhoon_fw_image),
1485                           ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1486                 typhoon_post_pci_writes(ioaddr);
1487                 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1488                           ioaddr + TYPHOON_REG_COMMAND);
1489
1490                 image_data += section_len;
1491         }
1492
1493         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1494            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1495            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1496                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1497                 goto err_out_irq;
1498         }
1499
1500         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1501
1502         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1503                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1504                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1505                 goto err_out_irq;
1506         }
1507
1508         err = 0;
1509
1510 err_out_irq:
1511         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1512         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1513
1514         pci_unmap_single(pdev, image_dma,  typhoon_fw->size, PCI_DMA_TODEVICE);
1515
1516 err_out:
1517         return err;
1518 }
1519
1520 static int
1521 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1522 {
1523         void __iomem *ioaddr = tp->ioaddr;
1524
1525         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1526                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1527                 goto out_timeout;
1528         }
1529
1530         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1531         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1532         typhoon_post_pci_writes(ioaddr);
1533         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1534                                 ioaddr + TYPHOON_REG_COMMAND);
1535
1536         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1537                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1538                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1539                 goto out_timeout;
1540         }
1541
1542         /* Clear the Transmit and Command ready registers
1543          */
1544         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1545         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1546         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1547         typhoon_post_pci_writes(ioaddr);
1548         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1549
1550         return 0;
1551
1552 out_timeout:
1553         return -ETIMEDOUT;
1554 }
1555
1556 static u32
1557 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1558                         volatile __le32 * index)
1559 {
1560         u32 lastRead = txRing->lastRead;
1561         struct tx_desc *tx;
1562         dma_addr_t skb_dma;
1563         int dma_len;
1564         int type;
1565
1566         while(lastRead != le32_to_cpu(*index)) {
1567                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1568                 type = tx->flags & TYPHOON_TYPE_MASK;
1569
1570                 if(type == TYPHOON_TX_DESC) {
1571                         /* This tx_desc describes a packet.
1572                          */
1573                         unsigned long ptr = tx->tx_addr;
1574                         struct sk_buff *skb = (struct sk_buff *) ptr;
1575                         dev_kfree_skb_irq(skb);
1576                 } else if(type == TYPHOON_FRAG_DESC) {
1577                         /* This tx_desc describes a memory mapping. Free it.
1578                          */
1579                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1580                         dma_len = le16_to_cpu(tx->len);
1581                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1582                                        PCI_DMA_TODEVICE);
1583                 }
1584
1585                 tx->flags = 0;
1586                 typhoon_inc_tx_index(&lastRead, 1);
1587         }
1588
1589         return lastRead;
1590 }
1591
1592 static void
1593 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1594                         volatile __le32 * index)
1595 {
1596         u32 lastRead;
1597         int numDesc = MAX_SKB_FRAGS + 1;
1598
1599         /* This will need changing if we start to use the Hi Tx ring. */
1600         lastRead = typhoon_clean_tx(tp, txRing, index);
1601         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1602                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1603                 netif_wake_queue(tp->dev);
1604
1605         txRing->lastRead = lastRead;
1606         smp_wmb();
1607 }
1608
1609 static void
1610 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1611 {
1612         struct typhoon_indexes *indexes = tp->indexes;
1613         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1614         struct basic_ring *ring = &tp->rxBuffRing;
1615         struct rx_free *r;
1616
1617         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1618                                 le32_to_cpu(indexes->rxBuffCleared)) {
1619                 /* no room in ring, just drop the skb
1620                  */
1621                 dev_kfree_skb_any(rxb->skb);
1622                 rxb->skb = NULL;
1623                 return;
1624         }
1625
1626         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1627         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1628         r->virtAddr = idx;
1629         r->physAddr = cpu_to_le32(rxb->dma_addr);
1630
1631         /* Tell the card about it */
1632         wmb();
1633         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1634 }
1635
1636 static int
1637 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1638 {
1639         struct typhoon_indexes *indexes = tp->indexes;
1640         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1641         struct basic_ring *ring = &tp->rxBuffRing;
1642         struct rx_free *r;
1643         struct sk_buff *skb;
1644         dma_addr_t dma_addr;
1645
1646         rxb->skb = NULL;
1647
1648         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1649                                 le32_to_cpu(indexes->rxBuffCleared))
1650                 return -ENOMEM;
1651
1652         skb = dev_alloc_skb(PKT_BUF_SZ);
1653         if(!skb)
1654                 return -ENOMEM;
1655
1656 #if 0
1657         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1658          * address! Pretty please?
1659          */
1660         skb_reserve(skb, 2);
1661 #endif
1662
1663         skb->dev = tp->dev;
1664         dma_addr = pci_map_single(tp->pdev, skb->data,
1665                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1666
1667         /* Since no card does 64 bit DAC, the high bits will never
1668          * change from zero.
1669          */
1670         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1671         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1672         r->virtAddr = idx;
1673         r->physAddr = cpu_to_le32(dma_addr);
1674         rxb->skb = skb;
1675         rxb->dma_addr = dma_addr;
1676
1677         /* Tell the card about it */
1678         wmb();
1679         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1680         return 0;
1681 }
1682
1683 static int
1684 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1685            volatile __le32 * cleared, int budget)
1686 {
1687         struct rx_desc *rx;
1688         struct sk_buff *skb, *new_skb;
1689         struct rxbuff_ent *rxb;
1690         dma_addr_t dma_addr;
1691         u32 local_ready;
1692         u32 rxaddr;
1693         int pkt_len;
1694         u32 idx;
1695         __le32 csum_bits;
1696         int received;
1697
1698         received = 0;
1699         local_ready = le32_to_cpu(*ready);
1700         rxaddr = le32_to_cpu(*cleared);
1701         while(rxaddr != local_ready && budget > 0) {
1702                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1703                 idx = rx->addr;
1704                 rxb = &tp->rxbuffers[idx];
1705                 skb = rxb->skb;
1706                 dma_addr = rxb->dma_addr;
1707
1708                 typhoon_inc_rx_index(&rxaddr, 1);
1709
1710                 if(rx->flags & TYPHOON_RX_ERROR) {
1711                         typhoon_recycle_rx_skb(tp, idx);
1712                         continue;
1713                 }
1714
1715                 pkt_len = le16_to_cpu(rx->frameLen);
1716
1717                 if(pkt_len < rx_copybreak &&
1718                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1719                         skb_reserve(new_skb, 2);
1720                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1721                                                     PKT_BUF_SZ,
1722                                                     PCI_DMA_FROMDEVICE);
1723                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1724                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1725                                                        PKT_BUF_SZ,
1726                                                        PCI_DMA_FROMDEVICE);
1727                         skb_put(new_skb, pkt_len);
1728                         typhoon_recycle_rx_skb(tp, idx);
1729                 } else {
1730                         new_skb = skb;
1731                         skb_put(new_skb, pkt_len);
1732                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1733                                        PCI_DMA_FROMDEVICE);
1734                         typhoon_alloc_rx_skb(tp, idx);
1735                 }
1736                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1737                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1738                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1739                 if(csum_bits ==
1740                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1741                    || csum_bits ==
1742                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1743                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1744                 } else
1745                         new_skb->ip_summed = CHECKSUM_NONE;
1746
1747                 spin_lock(&tp->state_lock);
1748                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1749                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1750                                                  ntohl(rx->vlanTag) & 0xffff);
1751                 else
1752                         netif_receive_skb(new_skb);
1753                 spin_unlock(&tp->state_lock);
1754
1755                 received++;
1756                 budget--;
1757         }
1758         *cleared = cpu_to_le32(rxaddr);
1759
1760         return received;
1761 }
1762
1763 static void
1764 typhoon_fill_free_ring(struct typhoon *tp)
1765 {
1766         u32 i;
1767
1768         for(i = 0; i < RXENT_ENTRIES; i++) {
1769                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1770                 if(rxb->skb)
1771                         continue;
1772                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1773                         break;
1774         }
1775 }
1776
1777 static int
1778 typhoon_poll(struct napi_struct *napi, int budget)
1779 {
1780         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1781         struct typhoon_indexes *indexes = tp->indexes;
1782         int work_done;
1783
1784         rmb();
1785         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1786                         typhoon_process_response(tp, 0, NULL);
1787
1788         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1789                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1790
1791         work_done = 0;
1792
1793         if(indexes->rxHiCleared != indexes->rxHiReady) {
1794                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1795                                         &indexes->rxHiCleared, budget);
1796         }
1797
1798         if(indexes->rxLoCleared != indexes->rxLoReady) {
1799                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1800                                         &indexes->rxLoCleared, budget - work_done);
1801         }
1802
1803         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1804                 /* rxBuff ring is empty, try to fill it. */
1805                 typhoon_fill_free_ring(tp);
1806         }
1807
1808         if (work_done < budget) {
1809                 napi_complete(napi);
1810                 iowrite32(TYPHOON_INTR_NONE,
1811                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1812                 typhoon_post_pci_writes(tp->ioaddr);
1813         }
1814
1815         return work_done;
1816 }
1817
1818 static irqreturn_t
1819 typhoon_interrupt(int irq, void *dev_instance)
1820 {
1821         struct net_device *dev = dev_instance;
1822         struct typhoon *tp = netdev_priv(dev);
1823         void __iomem *ioaddr = tp->ioaddr;
1824         u32 intr_status;
1825
1826         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1827         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1828                 return IRQ_NONE;
1829
1830         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1831
1832         if (napi_schedule_prep(&tp->napi)) {
1833                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1834                 typhoon_post_pci_writes(ioaddr);
1835                 __napi_schedule(&tp->napi);
1836         } else {
1837                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1838                        dev->name);
1839         }
1840         return IRQ_HANDLED;
1841 }
1842
1843 static void
1844 typhoon_free_rx_rings(struct typhoon *tp)
1845 {
1846         u32 i;
1847
1848         for(i = 0; i < RXENT_ENTRIES; i++) {
1849                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1850                 if(rxb->skb) {
1851                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1852                                        PCI_DMA_FROMDEVICE);
1853                         dev_kfree_skb(rxb->skb);
1854                         rxb->skb = NULL;
1855                 }
1856         }
1857 }
1858
1859 static int
1860 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1861 {
1862         struct pci_dev *pdev = tp->pdev;
1863         void __iomem *ioaddr = tp->ioaddr;
1864         struct cmd_desc xp_cmd;
1865         int err;
1866
1867         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1868         xp_cmd.parm1 = events;
1869         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1870         if(err < 0) {
1871                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1872                                 tp->name, err);
1873                 return err;
1874         }
1875
1876         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1877         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1878         if(err < 0) {
1879                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1880                                 tp->name, err);
1881                 return err;
1882         }
1883
1884         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1885                 return -ETIMEDOUT;
1886
1887         /* Since we cannot monitor the status of the link while sleeping,
1888          * tell the world it went away.
1889          */
1890         netif_carrier_off(tp->dev);
1891
1892         pci_enable_wake(tp->pdev, state, 1);
1893         pci_disable_device(pdev);
1894         return pci_set_power_state(pdev, state);
1895 }
1896
1897 static int
1898 typhoon_wakeup(struct typhoon *tp, int wait_type)
1899 {
1900         struct pci_dev *pdev = tp->pdev;
1901         void __iomem *ioaddr = tp->ioaddr;
1902
1903         pci_set_power_state(pdev, PCI_D0);
1904         pci_restore_state(pdev);
1905
1906         /* Post 2.x.x versions of the Sleep Image require a reset before
1907          * we can download the Runtime Image. But let's not make users of
1908          * the old firmware pay for the reset.
1909          */
1910         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1911         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1912                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1913                 return typhoon_reset(ioaddr, wait_type);
1914
1915         return 0;
1916 }
1917
1918 static int
1919 typhoon_start_runtime(struct typhoon *tp)
1920 {
1921         struct net_device *dev = tp->dev;
1922         void __iomem *ioaddr = tp->ioaddr;
1923         struct cmd_desc xp_cmd;
1924         int err;
1925
1926         typhoon_init_rings(tp);
1927         typhoon_fill_free_ring(tp);
1928
1929         err = typhoon_download_firmware(tp);
1930         if(err < 0) {
1931                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1932                 goto error_out;
1933         }
1934
1935         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1936                 printk("%s: cannot boot 3XP\n", tp->name);
1937                 err = -EIO;
1938                 goto error_out;
1939         }
1940
1941         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1942         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1943         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1944         if(err < 0)
1945                 goto error_out;
1946
1947         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1948         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1949         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1950         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1951         if(err < 0)
1952                 goto error_out;
1953
1954         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1955          * us some more information on how to control it.
1956          */
1957         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1958         xp_cmd.parm1 = 0;
1959         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1960         if(err < 0)
1961                 goto error_out;
1962
1963         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1964         xp_cmd.parm1 = tp->xcvr_select;
1965         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1966         if(err < 0)
1967                 goto error_out;
1968
1969         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1970         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1971         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1972         if(err < 0)
1973                 goto error_out;
1974
1975         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1976         spin_lock_bh(&tp->state_lock);
1977         xp_cmd.parm2 = tp->offload;
1978         xp_cmd.parm3 = tp->offload;
1979         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1980         spin_unlock_bh(&tp->state_lock);
1981         if(err < 0)
1982                 goto error_out;
1983
1984         typhoon_set_rx_mode(dev);
1985
1986         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1987         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1988         if(err < 0)
1989                 goto error_out;
1990
1991         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1992         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993         if(err < 0)
1994                 goto error_out;
1995
1996         tp->card_state = Running;
1997         smp_wmb();
1998
1999         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2000         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2001         typhoon_post_pci_writes(ioaddr);
2002
2003         return 0;
2004
2005 error_out:
2006         typhoon_reset(ioaddr, WaitNoSleep);
2007         typhoon_free_rx_rings(tp);
2008         typhoon_init_rings(tp);
2009         return err;
2010 }
2011
2012 static int
2013 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2014 {
2015         struct typhoon_indexes *indexes = tp->indexes;
2016         struct transmit_ring *txLo = &tp->txLoRing;
2017         void __iomem *ioaddr = tp->ioaddr;
2018         struct cmd_desc xp_cmd;
2019         int i;
2020
2021         /* Disable interrupts early, since we can't schedule a poll
2022          * when called with !netif_running(). This will be posted
2023          * when we force the posting of the command.
2024          */
2025         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2026
2027         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2028         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2029
2030         /* Wait 1/2 sec for any outstanding transmits to occur
2031          * We'll cleanup after the reset if this times out.
2032          */
2033         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2034                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2035                         break;
2036                 udelay(TYPHOON_UDELAY);
2037         }
2038
2039         if(i == TYPHOON_WAIT_TIMEOUT)
2040                 printk(KERN_ERR
2041                        "%s: halt timed out waiting for Tx to complete\n",
2042                        tp->name);
2043
2044         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2045         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2046
2047         /* save the statistics so when we bring the interface up again,
2048          * the values reported to userspace are correct.
2049          */
2050         tp->card_state = Sleeping;
2051         smp_wmb();
2052         typhoon_do_get_stats(tp);
2053         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2054
2055         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2056         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2057
2058         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2059                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2060                        tp->name);
2061
2062         if(typhoon_reset(ioaddr, wait_type) < 0) {
2063                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2064                 return -ETIMEDOUT;
2065         }
2066
2067         /* cleanup any outstanding Tx packets */
2068         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2069                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2070                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2071         }
2072
2073         return 0;
2074 }
2075
2076 static void
2077 typhoon_tx_timeout(struct net_device *dev)
2078 {
2079         struct typhoon *tp = netdev_priv(dev);
2080
2081         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2082                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2083                                         dev->name);
2084                 goto truely_dead;
2085         }
2086
2087         /* If we ever start using the Hi ring, it will need cleaning too */
2088         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2089         typhoon_free_rx_rings(tp);
2090
2091         if(typhoon_start_runtime(tp) < 0) {
2092                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2093                                         dev->name);
2094                 goto truely_dead;
2095         }
2096
2097         netif_wake_queue(dev);
2098         return;
2099
2100 truely_dead:
2101         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2102         typhoon_reset(tp->ioaddr, NoWait);
2103         netif_carrier_off(dev);
2104 }
2105
2106 static int
2107 typhoon_open(struct net_device *dev)
2108 {
2109         struct typhoon *tp = netdev_priv(dev);
2110         int err;
2111
2112         err = typhoon_request_firmware(tp);
2113         if (err)
2114                 goto out;
2115
2116         err = typhoon_wakeup(tp, WaitSleep);
2117         if(err < 0) {
2118                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2119                 goto out_sleep;
2120         }
2121
2122         err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2123                                 dev->name, dev);
2124         if(err < 0)
2125                 goto out_sleep;
2126
2127         napi_enable(&tp->napi);
2128
2129         err = typhoon_start_runtime(tp);
2130         if(err < 0) {
2131                 napi_disable(&tp->napi);
2132                 goto out_irq;
2133         }
2134
2135         netif_start_queue(dev);
2136         return 0;
2137
2138 out_irq:
2139         free_irq(dev->irq, dev);
2140
2141 out_sleep:
2142         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2143                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2144                                 dev->name);
2145                 typhoon_reset(tp->ioaddr, NoWait);
2146                 goto out;
2147         }
2148
2149         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2150                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2151
2152 out:
2153         return err;
2154 }
2155
2156 static int
2157 typhoon_close(struct net_device *dev)
2158 {
2159         struct typhoon *tp = netdev_priv(dev);
2160
2161         netif_stop_queue(dev);
2162         napi_disable(&tp->napi);
2163
2164         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2165                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2166
2167         /* Make sure there is no irq handler running on a different CPU. */
2168         free_irq(dev->irq, dev);
2169
2170         typhoon_free_rx_rings(tp);
2171         typhoon_init_rings(tp);
2172
2173         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2174                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2175
2176         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2177                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2178
2179         return 0;
2180 }
2181
2182 #ifdef CONFIG_PM
2183 static int
2184 typhoon_resume(struct pci_dev *pdev)
2185 {
2186         struct net_device *dev = pci_get_drvdata(pdev);
2187         struct typhoon *tp = netdev_priv(dev);
2188
2189         /* If we're down, resume when we are upped.
2190          */
2191         if(!netif_running(dev))
2192                 return 0;
2193
2194         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2195                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2196                                 dev->name);
2197                 goto reset;
2198         }
2199
2200         if(typhoon_start_runtime(tp) < 0) {
2201                 printk(KERN_ERR "%s: critical: could not start runtime in "
2202                                 "resume\n", dev->name);
2203                 goto reset;
2204         }
2205
2206         netif_device_attach(dev);
2207         return 0;
2208
2209 reset:
2210         typhoon_reset(tp->ioaddr, NoWait);
2211         return -EBUSY;
2212 }
2213
2214 static int
2215 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2216 {
2217         struct net_device *dev = pci_get_drvdata(pdev);
2218         struct typhoon *tp = netdev_priv(dev);
2219         struct cmd_desc xp_cmd;
2220
2221         /* If we're down, we're already suspended.
2222          */
2223         if(!netif_running(dev))
2224                 return 0;
2225
2226         spin_lock_bh(&tp->state_lock);
2227         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2228                 spin_unlock_bh(&tp->state_lock);
2229                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2230                                 dev->name);
2231                 return -EBUSY;
2232         }
2233         spin_unlock_bh(&tp->state_lock);
2234
2235         netif_device_detach(dev);
2236
2237         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2238                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2239                 goto need_resume;
2240         }
2241
2242         typhoon_free_rx_rings(tp);
2243         typhoon_init_rings(tp);
2244
2245         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2246                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2247                 goto need_resume;
2248         }
2249
2250         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2251         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2252         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2253         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2254                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2255                                 dev->name);
2256                 goto need_resume;
2257         }
2258
2259         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2260         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2261         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2262                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2263                                 dev->name);
2264                 goto need_resume;
2265         }
2266
2267         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2268                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2269                 goto need_resume;
2270         }
2271
2272         return 0;
2273
2274 need_resume:
2275         typhoon_resume(pdev);
2276         return -EBUSY;
2277 }
2278 #endif
2279
2280 static int __devinit
2281 typhoon_test_mmio(struct pci_dev *pdev)
2282 {
2283         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2284         int mode = 0;
2285         u32 val;
2286
2287         if(!ioaddr)
2288                 goto out;
2289
2290         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2291                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2292                 goto out_unmap;
2293
2294         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2295         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2296         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2297
2298         /* Ok, see if we can change our interrupt status register by
2299          * sending ourselves an interrupt. If so, then MMIO works.
2300          * The 50usec delay is arbitrary -- it could probably be smaller.
2301          */
2302         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2303         if((val & TYPHOON_INTR_SELF) == 0) {
2304                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2305                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2306                 udelay(50);
2307                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2308                 if(val & TYPHOON_INTR_SELF)
2309                         mode = 1;
2310         }
2311
2312         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2313         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2314         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2315         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2316
2317 out_unmap:
2318         pci_iounmap(pdev, ioaddr);
2319
2320 out:
2321         if(!mode)
2322                 printk(KERN_INFO PFX "falling back to port IO\n");
2323         return mode;
2324 }
2325
2326 static const struct net_device_ops typhoon_netdev_ops = {
2327         .ndo_open               = typhoon_open,
2328         .ndo_stop               = typhoon_close,
2329         .ndo_start_xmit         = typhoon_start_tx,
2330         .ndo_set_multicast_list = typhoon_set_rx_mode,
2331         .ndo_tx_timeout         = typhoon_tx_timeout,
2332         .ndo_get_stats          = typhoon_get_stats,
2333         .ndo_validate_addr      = eth_validate_addr,
2334         .ndo_set_mac_address    = typhoon_set_mac_address,
2335         .ndo_change_mtu         = eth_change_mtu,
2336         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2337 };
2338
2339 static int __devinit
2340 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2341 {
2342         static int did_version = 0;
2343         struct net_device *dev;
2344         struct typhoon *tp;
2345         int card_id = (int) ent->driver_data;
2346         void __iomem *ioaddr;
2347         void *shared;
2348         dma_addr_t shared_dma;
2349         struct cmd_desc xp_cmd;
2350         struct resp_desc xp_resp[3];
2351         int err = 0;
2352
2353         if(!did_version++)
2354                 printk(KERN_INFO "%s", version);
2355
2356         dev = alloc_etherdev(sizeof(*tp));
2357         if(dev == NULL) {
2358                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2359                        pci_name(pdev));
2360                 err = -ENOMEM;
2361                 goto error_out;
2362         }
2363         SET_NETDEV_DEV(dev, &pdev->dev);
2364
2365         err = pci_enable_device(pdev);
2366         if(err < 0) {
2367                 printk(ERR_PFX "%s: unable to enable device\n",
2368                        pci_name(pdev));
2369                 goto error_out_dev;
2370         }
2371
2372         err = pci_set_mwi(pdev);
2373         if(err < 0) {
2374                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2375                 goto error_out_disable;
2376         }
2377
2378         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2379         if(err < 0) {
2380                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2381                        pci_name(pdev));
2382                 goto error_out_mwi;
2383         }
2384
2385         /* sanity checks on IO and MMIO BARs
2386          */
2387         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2388                 printk(ERR_PFX
2389                        "%s: region #1 not a PCI IO resource, aborting\n",
2390                        pci_name(pdev));
2391                 err = -ENODEV;
2392                 goto error_out_mwi;
2393         }
2394         if(pci_resource_len(pdev, 0) < 128) {
2395                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2396                        pci_name(pdev));
2397                 err = -ENODEV;
2398                 goto error_out_mwi;
2399         }
2400         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2401                 printk(ERR_PFX
2402                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2403                        pci_name(pdev));
2404                 err = -ENODEV;
2405                 goto error_out_mwi;
2406         }
2407         if(pci_resource_len(pdev, 1) < 128) {
2408                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2409                        pci_name(pdev));
2410                 err = -ENODEV;
2411                 goto error_out_mwi;
2412         }
2413
2414         err = pci_request_regions(pdev, "typhoon");
2415         if(err < 0) {
2416                 printk(ERR_PFX "%s: could not request regions\n",
2417                        pci_name(pdev));
2418                 goto error_out_mwi;
2419         }
2420
2421         /* map our registers
2422          */
2423         if(use_mmio != 0 && use_mmio != 1)
2424                 use_mmio = typhoon_test_mmio(pdev);
2425
2426         ioaddr = pci_iomap(pdev, use_mmio, 128);
2427         if (!ioaddr) {
2428                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2429                        pci_name(pdev));
2430                 err = -EIO;
2431                 goto error_out_regions;
2432         }
2433
2434         /* allocate pci dma space for rx and tx descriptor rings
2435          */
2436         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2437                                       &shared_dma);
2438         if(!shared) {
2439                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2440                        pci_name(pdev));
2441                 err = -ENOMEM;
2442                 goto error_out_remap;
2443         }
2444
2445         dev->irq = pdev->irq;
2446         tp = netdev_priv(dev);
2447         tp->shared = (struct typhoon_shared *) shared;
2448         tp->shared_dma = shared_dma;
2449         tp->pdev = pdev;
2450         tp->tx_pdev = pdev;
2451         tp->ioaddr = ioaddr;
2452         tp->tx_ioaddr = ioaddr;
2453         tp->dev = dev;
2454
2455         /* Init sequence:
2456          * 1) Reset the adapter to clear any bad juju
2457          * 2) Reload the sleep image
2458          * 3) Boot the sleep image
2459          * 4) Get the hardware address.
2460          * 5) Put the card to sleep.
2461          */
2462         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2463                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2464                 err = -EIO;
2465                 goto error_out_dma;
2466         }
2467
2468         /* Now that we've reset the 3XP and are sure it's not going to
2469          * write all over memory, enable bus mastering, and save our
2470          * state for resuming after a suspend.
2471          */
2472         pci_set_master(pdev);
2473         pci_save_state(pdev);
2474
2475         /* dev->name is not valid until we register, but we need to
2476          * use some common routines to initialize the card. So that those
2477          * routines print the right name, we keep our oun pointer to the name
2478          */
2479         tp->name = pci_name(pdev);
2480
2481         typhoon_init_interface(tp);
2482         typhoon_init_rings(tp);
2483
2484         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2485                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2486                        pci_name(pdev));
2487                 err = -EIO;
2488                 goto error_out_reset;
2489         }
2490
2491         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2492         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2493                 printk(ERR_PFX "%s: cannot read MAC address\n",
2494                        pci_name(pdev));
2495                 err = -EIO;
2496                 goto error_out_reset;
2497         }
2498
2499         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2500         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2501
2502         if(!is_valid_ether_addr(dev->dev_addr)) {
2503                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2504                        "aborting\n", pci_name(pdev));
2505                 goto error_out_reset;
2506         }
2507
2508         /* Read the Sleep Image version last, so the response is valid
2509          * later when we print out the version reported.
2510          */
2511         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2512         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2513                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2514                         pci_name(pdev));
2515                 goto error_out_reset;
2516         }
2517
2518         tp->capabilities = typhoon_card_info[card_id].capabilities;
2519         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2520
2521         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2522          * READ_VERSIONS command. Those versions are OK after waking up
2523          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2524          * seem to need a little extra help to get started. Since we don't
2525          * know how to nudge it along, just kick it.
2526          */
2527         if(xp_resp[0].numDesc != 0)
2528                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2529
2530         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2531                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2532                        pci_name(pdev));
2533                 err = -EIO;
2534                 goto error_out_reset;
2535         }
2536
2537         /* The chip-specific entries in the device structure. */
2538         dev->netdev_ops         = &typhoon_netdev_ops;
2539         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2540         dev->watchdog_timeo     = TX_TIMEOUT;
2541
2542         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2543
2544         /* We can handle scatter gather, up to 16 entries, and
2545          * we can do IP checksumming (only version 4, doh...)
2546          */
2547         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2548         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2549         dev->features |= NETIF_F_TSO;
2550
2551         if(register_netdev(dev) < 0)
2552                 goto error_out_reset;
2553
2554         /* fixup our local name */
2555         tp->name = dev->name;
2556
2557         pci_set_drvdata(pdev, dev);
2558
2559         printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2560                dev->name, typhoon_card_info[card_id].name,
2561                use_mmio ? "MMIO" : "IO",
2562                (unsigned long long)pci_resource_start(pdev, use_mmio),
2563                dev->dev_addr);
2564
2565         /* xp_resp still contains the response to the READ_VERSIONS command.
2566          * For debugging, let the user know what version he has.
2567          */
2568         if(xp_resp[0].numDesc == 0) {
2569                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2570                  * of version is Month/Day of build.
2571                  */
2572                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2573                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2574                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2575                         monthday & 0xff);
2576         } else if(xp_resp[0].numDesc == 2) {
2577                 /* This is the Typhoon 1.1+ type Sleep Image
2578                  */
2579                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2580                 u8 *ver_string = (u8 *) &xp_resp[1];
2581                 ver_string[25] = 0;
2582                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2583                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2584                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2585                         ver_string);
2586         } else {
2587                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2588                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2589                         le32_to_cpu(xp_resp[0].parm2));
2590         }
2591
2592         return 0;
2593
2594 error_out_reset:
2595         typhoon_reset(ioaddr, NoWait);
2596
2597 error_out_dma:
2598         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2599                             shared, shared_dma);
2600 error_out_remap:
2601         pci_iounmap(pdev, ioaddr);
2602 error_out_regions:
2603         pci_release_regions(pdev);
2604 error_out_mwi:
2605         pci_clear_mwi(pdev);
2606 error_out_disable:
2607         pci_disable_device(pdev);
2608 error_out_dev:
2609         free_netdev(dev);
2610 error_out:
2611         return err;
2612 }
2613
2614 static void __devexit
2615 typhoon_remove_one(struct pci_dev *pdev)
2616 {
2617         struct net_device *dev = pci_get_drvdata(pdev);
2618         struct typhoon *tp = netdev_priv(dev);
2619
2620         unregister_netdev(dev);
2621         pci_set_power_state(pdev, PCI_D0);
2622         pci_restore_state(pdev);
2623         typhoon_reset(tp->ioaddr, NoWait);
2624         pci_iounmap(pdev, tp->ioaddr);
2625         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2626                             tp->shared, tp->shared_dma);
2627         pci_release_regions(pdev);
2628         pci_clear_mwi(pdev);
2629         pci_disable_device(pdev);
2630         pci_set_drvdata(pdev, NULL);
2631         free_netdev(dev);
2632 }
2633
2634 static struct pci_driver typhoon_driver = {
2635         .name           = DRV_MODULE_NAME,
2636         .id_table       = typhoon_pci_tbl,
2637         .probe          = typhoon_init_one,
2638         .remove         = __devexit_p(typhoon_remove_one),
2639 #ifdef CONFIG_PM
2640         .suspend        = typhoon_suspend,
2641         .resume         = typhoon_resume,
2642 #endif
2643 };
2644
2645 static int __init
2646 typhoon_init(void)
2647 {
2648         return pci_register_driver(&typhoon_driver);
2649 }
2650
2651 static void __exit
2652 typhoon_cleanup(void)
2653 {
2654         if (typhoon_fw) {
2655                 kfree(typhoon_fw_image);
2656                 release_firmware(typhoon_fw);
2657         }
2658         pci_unregister_driver(&typhoon_driver);
2659 }
2660
2661 module_init(typhoon_init);
2662 module_exit(typhoon_cleanup);