[TG3]: Add 5787 nvram support
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.51"
73 #define DRV_MODULE_RELDATE      "Feb 21, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { 0, }
261 };
262
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264
265 static struct {
266         const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
268         { "rx_octets" },
269         { "rx_fragments" },
270         { "rx_ucast_packets" },
271         { "rx_mcast_packets" },
272         { "rx_bcast_packets" },
273         { "rx_fcs_errors" },
274         { "rx_align_errors" },
275         { "rx_xon_pause_rcvd" },
276         { "rx_xoff_pause_rcvd" },
277         { "rx_mac_ctrl_rcvd" },
278         { "rx_xoff_entered" },
279         { "rx_frame_too_long_errors" },
280         { "rx_jabbers" },
281         { "rx_undersize_packets" },
282         { "rx_in_length_errors" },
283         { "rx_out_length_errors" },
284         { "rx_64_or_less_octet_packets" },
285         { "rx_65_to_127_octet_packets" },
286         { "rx_128_to_255_octet_packets" },
287         { "rx_256_to_511_octet_packets" },
288         { "rx_512_to_1023_octet_packets" },
289         { "rx_1024_to_1522_octet_packets" },
290         { "rx_1523_to_2047_octet_packets" },
291         { "rx_2048_to_4095_octet_packets" },
292         { "rx_4096_to_8191_octet_packets" },
293         { "rx_8192_to_9022_octet_packets" },
294
295         { "tx_octets" },
296         { "tx_collisions" },
297
298         { "tx_xon_sent" },
299         { "tx_xoff_sent" },
300         { "tx_flow_control" },
301         { "tx_mac_errors" },
302         { "tx_single_collisions" },
303         { "tx_mult_collisions" },
304         { "tx_deferred" },
305         { "tx_excessive_collisions" },
306         { "tx_late_collisions" },
307         { "tx_collide_2times" },
308         { "tx_collide_3times" },
309         { "tx_collide_4times" },
310         { "tx_collide_5times" },
311         { "tx_collide_6times" },
312         { "tx_collide_7times" },
313         { "tx_collide_8times" },
314         { "tx_collide_9times" },
315         { "tx_collide_10times" },
316         { "tx_collide_11times" },
317         { "tx_collide_12times" },
318         { "tx_collide_13times" },
319         { "tx_collide_14times" },
320         { "tx_collide_15times" },
321         { "tx_ucast_packets" },
322         { "tx_mcast_packets" },
323         { "tx_bcast_packets" },
324         { "tx_carrier_sense_errors" },
325         { "tx_discards" },
326         { "tx_errors" },
327
328         { "dma_writeq_full" },
329         { "dma_write_prioq_full" },
330         { "rxbds_empty" },
331         { "rx_discards" },
332         { "rx_errors" },
333         { "rx_threshold_hit" },
334
335         { "dma_readq_full" },
336         { "dma_read_prioq_full" },
337         { "tx_comp_queue_full" },
338
339         { "ring_set_send_prod_index" },
340         { "ring_status_update" },
341         { "nic_irqs" },
342         { "nic_avoided_irqs" },
343         { "nic_tx_threshold_hit" }
344 };
345
346 static struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349         { "nvram test     (online) " },
350         { "link test      (online) " },
351         { "register test  (offline)" },
352         { "memory test    (offline)" },
353         { "loopback test  (offline)" },
354         { "interrupt test (offline)" },
355 };
356
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 {
359         writel(val, tp->regs + off);
360 }
361
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 {
364         return (readl(tp->regs + off)); 
365 }
366
367 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&tp->indirect_lock, flags);
372         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
374         spin_unlock_irqrestore(&tp->indirect_lock, flags);
375 }
376
377 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
378 {
379         writel(val, tp->regs + off);
380         readl(tp->regs + off);
381 }
382
383 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
384 {
385         unsigned long flags;
386         u32 val;
387
388         spin_lock_irqsave(&tp->indirect_lock, flags);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
390         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392         return val;
393 }
394
395 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 {
397         unsigned long flags;
398
399         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
400                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
401                                        TG3_64BIT_REG_LOW, val);
402                 return;
403         }
404         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409
410         spin_lock_irqsave(&tp->indirect_lock, flags);
411         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
412         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
413         spin_unlock_irqrestore(&tp->indirect_lock, flags);
414
415         /* In indirect mode when disabling interrupts, we also need
416          * to clear the interrupt bit in the GRC local ctrl register.
417          */
418         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
419             (val == 0x1)) {
420                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
421                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
422         }
423 }
424
425 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
426 {
427         unsigned long flags;
428         u32 val;
429
430         spin_lock_irqsave(&tp->indirect_lock, flags);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
432         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
433         spin_unlock_irqrestore(&tp->indirect_lock, flags);
434         return val;
435 }
436
437 /* usec_wait specifies the wait time in usec when writing to certain registers
438  * where it is unsafe to read back the register without some delay.
439  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
440  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
441  */
442 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
443 {
444         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
445             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
446                 /* Non-posted methods */
447                 tp->write32(tp, off, val);
448         else {
449                 /* Posted method */
450                 tg3_write32(tp, off, val);
451                 if (usec_wait)
452                         udelay(usec_wait);
453                 tp->read32(tp, off);
454         }
455         /* Wait again after the read for the posted method to guarantee that
456          * the wait time is met.
457          */
458         if (usec_wait)
459                 udelay(usec_wait);
460 }
461
462 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
463 {
464         tp->write32_mbox(tp, off, val);
465         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
466             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467                 tp->read32_mbox(tp, off);
468 }
469
470 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
471 {
472         void __iomem *mbox = tp->regs + off;
473         writel(val, mbox);
474         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
475                 writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
477                 readl(mbox);
478 }
479
480 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
481 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
482 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
483 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
484 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
485
486 #define tw32(reg,val)           tp->write32(tp, reg, val)
487 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
488 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
489 #define tr32(reg)               tp->read32(tp, reg)
490
491 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
498
499         /* Always leave this as zero. */
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
505 {
506         /* If no workaround is needed, write to mem space directly */
507         if (tp->write32 != tg3_write_indirect_reg32)
508                 tw32(NIC_SRAM_WIN_BASE + off, val);
509         else
510                 tg3_write_mem(tp, off, val);
511 }
512
513 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
514 {
515         unsigned long flags;
516
517         spin_lock_irqsave(&tp->indirect_lock, flags);
518         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
519         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
520
521         /* Always leave this as zero. */
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_disable_ints(struct tg3 *tp)
527 {
528         tw32(TG3PCI_MISC_HOST_CTRL,
529              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
531 }
532
533 static inline void tg3_cond_int(struct tg3 *tp)
534 {
535         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
536             (tp->hw_status->status & SD_STATUS_UPDATED))
537                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
538 }
539
540 static void tg3_enable_ints(struct tg3 *tp)
541 {
542         tp->irq_sync = 0;
543         wmb();
544
545         tw32(TG3PCI_MISC_HOST_CTRL,
546              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
548                        (tp->last_tag << 24));
549         tg3_cond_int(tp);
550 }
551
552 static inline unsigned int tg3_has_work(struct tg3 *tp)
553 {
554         struct tg3_hw_status *sblk = tp->hw_status;
555         unsigned int work_exists = 0;
556
557         /* check for phy events */
558         if (!(tp->tg3_flags &
559               (TG3_FLAG_USE_LINKCHG_REG |
560                TG3_FLAG_POLL_SERDES))) {
561                 if (sblk->status & SD_STATUS_LINK_CHG)
562                         work_exists = 1;
563         }
564         /* check for RX/TX work to do */
565         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
566             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
567                 work_exists = 1;
568
569         return work_exists;
570 }
571
572 /* tg3_restart_ints
573  *  similar to tg3_enable_ints, but it accurately determines whether there
574  *  is new work pending and can return without flushing the PIO write
575  *  which reenables interrupts 
576  */
577 static void tg3_restart_ints(struct tg3 *tp)
578 {
579         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
580                      tp->last_tag << 24);
581         mmiowb();
582
583         /* When doing tagged status, this work check is unnecessary.
584          * The last_tag we write above tells the chip which piece of
585          * work we've completed.
586          */
587         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
588             tg3_has_work(tp))
589                 tw32(HOSTCC_MODE, tp->coalesce_mode |
590                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
591 }
592
593 static inline void tg3_netif_stop(struct tg3 *tp)
594 {
595         tp->dev->trans_start = jiffies; /* prevent tx timeout */
596         netif_poll_disable(tp->dev);
597         netif_tx_disable(tp->dev);
598 }
599
600 static inline void tg3_netif_start(struct tg3 *tp)
601 {
602         netif_wake_queue(tp->dev);
603         /* NOTE: unconditional netif_wake_queue is only appropriate
604          * so long as all callers are assured to have free tx slots
605          * (such as after tg3_init_hw)
606          */
607         netif_poll_enable(tp->dev);
608         tp->hw_status->status |= SD_STATUS_UPDATED;
609         tg3_enable_ints(tp);
610 }
611
612 static void tg3_switch_clocks(struct tg3 *tp)
613 {
614         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
615         u32 orig_clock_ctrl;
616
617         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
618                 return;
619
620         orig_clock_ctrl = clock_ctrl;
621         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
622                        CLOCK_CTRL_CLKRUN_OENABLE |
623                        0x1f);
624         tp->pci_clock_ctrl = clock_ctrl;
625
626         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
627                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
628                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
629                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
630                 }
631         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
632                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
633                             clock_ctrl |
634                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
635                             40);
636                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
637                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
638                             40);
639         }
640         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
641 }
642
643 #define PHY_BUSY_LOOPS  5000
644
645 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
646 {
647         u32 frame_val;
648         unsigned int loops;
649         int ret;
650
651         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
652                 tw32_f(MAC_MI_MODE,
653                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
654                 udelay(80);
655         }
656
657         *val = 0x0;
658
659         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
660                       MI_COM_PHY_ADDR_MASK);
661         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
662                       MI_COM_REG_ADDR_MASK);
663         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
664         
665         tw32_f(MAC_MI_COM, frame_val);
666
667         loops = PHY_BUSY_LOOPS;
668         while (loops != 0) {
669                 udelay(10);
670                 frame_val = tr32(MAC_MI_COM);
671
672                 if ((frame_val & MI_COM_BUSY) == 0) {
673                         udelay(5);
674                         frame_val = tr32(MAC_MI_COM);
675                         break;
676                 }
677                 loops -= 1;
678         }
679
680         ret = -EBUSY;
681         if (loops != 0) {
682                 *val = frame_val & MI_COM_DATA_MASK;
683                 ret = 0;
684         }
685
686         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
687                 tw32_f(MAC_MI_MODE, tp->mi_mode);
688                 udelay(80);
689         }
690
691         return ret;
692 }
693
694 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
695 {
696         u32 frame_val;
697         unsigned int loops;
698         int ret;
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE,
702                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
703                 udelay(80);
704         }
705
706         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
707                       MI_COM_PHY_ADDR_MASK);
708         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
709                       MI_COM_REG_ADDR_MASK);
710         frame_val |= (val & MI_COM_DATA_MASK);
711         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
712         
713         tw32_f(MAC_MI_COM, frame_val);
714
715         loops = PHY_BUSY_LOOPS;
716         while (loops != 0) {
717                 udelay(10);
718                 frame_val = tr32(MAC_MI_COM);
719                 if ((frame_val & MI_COM_BUSY) == 0) {
720                         udelay(5);
721                         frame_val = tr32(MAC_MI_COM);
722                         break;
723                 }
724                 loops -= 1;
725         }
726
727         ret = -EBUSY;
728         if (loops != 0)
729                 ret = 0;
730
731         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
732                 tw32_f(MAC_MI_MODE, tp->mi_mode);
733                 udelay(80);
734         }
735
736         return ret;
737 }
738
739 static void tg3_phy_set_wirespeed(struct tg3 *tp)
740 {
741         u32 val;
742
743         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
744                 return;
745
746         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
747             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
748                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
749                              (val | (1 << 15) | (1 << 4)));
750 }
751
752 static int tg3_bmcr_reset(struct tg3 *tp)
753 {
754         u32 phy_control;
755         int limit, err;
756
757         /* OK, reset it, and poll the BMCR_RESET bit until it
758          * clears or we time out.
759          */
760         phy_control = BMCR_RESET;
761         err = tg3_writephy(tp, MII_BMCR, phy_control);
762         if (err != 0)
763                 return -EBUSY;
764
765         limit = 5000;
766         while (limit--) {
767                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
768                 if (err != 0)
769                         return -EBUSY;
770
771                 if ((phy_control & BMCR_RESET) == 0) {
772                         udelay(40);
773                         break;
774                 }
775                 udelay(10);
776         }
777         if (limit <= 0)
778                 return -EBUSY;
779
780         return 0;
781 }
782
783 static int tg3_wait_macro_done(struct tg3 *tp)
784 {
785         int limit = 100;
786
787         while (limit--) {
788                 u32 tmp32;
789
790                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
791                         if ((tmp32 & 0x1000) == 0)
792                                 break;
793                 }
794         }
795         if (limit <= 0)
796                 return -EBUSY;
797
798         return 0;
799 }
800
801 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
802 {
803         static const u32 test_pat[4][6] = {
804         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
805         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
806         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
807         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
808         };
809         int chan;
810
811         for (chan = 0; chan < 4; chan++) {
812                 int i;
813
814                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
815                              (chan * 0x2000) | 0x0200);
816                 tg3_writephy(tp, 0x16, 0x0002);
817
818                 for (i = 0; i < 6; i++)
819                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
820                                      test_pat[chan][i]);
821
822                 tg3_writephy(tp, 0x16, 0x0202);
823                 if (tg3_wait_macro_done(tp)) {
824                         *resetp = 1;
825                         return -EBUSY;
826                 }
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0082);
831                 if (tg3_wait_macro_done(tp)) {
832                         *resetp = 1;
833                         return -EBUSY;
834                 }
835
836                 tg3_writephy(tp, 0x16, 0x0802);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 for (i = 0; i < 6; i += 2) {
843                         u32 low, high;
844
845                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
846                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
847                             tg3_wait_macro_done(tp)) {
848                                 *resetp = 1;
849                                 return -EBUSY;
850                         }
851                         low &= 0x7fff;
852                         high &= 0x000f;
853                         if (low != test_pat[chan][i] ||
854                             high != test_pat[chan][i+1]) {
855                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
856                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
857                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
858
859                                 return -EBUSY;
860                         }
861                 }
862         }
863
864         return 0;
865 }
866
867 static int tg3_phy_reset_chanpat(struct tg3 *tp)
868 {
869         int chan;
870
871         for (chan = 0; chan < 4; chan++) {
872                 int i;
873
874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
875                              (chan * 0x2000) | 0x0200);
876                 tg3_writephy(tp, 0x16, 0x0002);
877                 for (i = 0; i < 6; i++)
878                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
879                 tg3_writephy(tp, 0x16, 0x0202);
880                 if (tg3_wait_macro_done(tp))
881                         return -EBUSY;
882         }
883
884         return 0;
885 }
886
887 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
888 {
889         u32 reg32, phy9_orig;
890         int retries, do_phy_reset, err;
891
892         retries = 10;
893         do_phy_reset = 1;
894         do {
895                 if (do_phy_reset) {
896                         err = tg3_bmcr_reset(tp);
897                         if (err)
898                                 return err;
899                         do_phy_reset = 0;
900                 }
901
902                 /* Disable transmitter and interrupt.  */
903                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
904                         continue;
905
906                 reg32 |= 0x3000;
907                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
908
909                 /* Set full-duplex, 1000 mbps.  */
910                 tg3_writephy(tp, MII_BMCR,
911                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
912
913                 /* Set to master mode.  */
914                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
915                         continue;
916
917                 tg3_writephy(tp, MII_TG3_CTRL,
918                              (MII_TG3_CTRL_AS_MASTER |
919                               MII_TG3_CTRL_ENABLE_AS_MASTER));
920
921                 /* Enable SM_DSP_CLOCK and 6dB.  */
922                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
923
924                 /* Block the PHY control access.  */
925                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
927
928                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
929                 if (!err)
930                         break;
931         } while (--retries);
932
933         err = tg3_phy_reset_chanpat(tp);
934         if (err)
935                 return err;
936
937         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
938         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
939
940         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
941         tg3_writephy(tp, 0x16, 0x0000);
942
943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
945                 /* Set Extended packet length bit for jumbo frames */
946                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
947         }
948         else {
949                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
950         }
951
952         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
953
954         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
955                 reg32 &= ~0x3000;
956                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
957         } else if (!err)
958                 err = -EBUSY;
959
960         return err;
961 }
962
963 /* This will reset the tigon3 PHY if there is no valid
964  * link unless the FORCE argument is non-zero.
965  */
966 static int tg3_phy_reset(struct tg3 *tp)
967 {
968         u32 phy_status;
969         int err;
970
971         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
972         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
973         if (err != 0)
974                 return -EBUSY;
975
976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
979                 err = tg3_phy_reset_5703_4_5(tp);
980                 if (err)
981                         return err;
982                 goto out;
983         }
984
985         err = tg3_bmcr_reset(tp);
986         if (err)
987                 return err;
988
989 out:
990         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
991                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
993                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
994                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
995                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
996                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
997         }
998         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
999                 tg3_writephy(tp, 0x1c, 0x8d68);
1000                 tg3_writephy(tp, 0x1c, 0x8d68);
1001         }
1002         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1004                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1005                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         /* Set Extended packet length bit (bit 14) on all chips that */
1013         /* support jumbo frames */
1014         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1015                 /* Cannot do read-modify-write on 5401 */
1016                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1017         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1018                 u32 phy_reg;
1019
1020                 /* Set bit 14 with read-modify-write to preserve other bits */
1021                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1022                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1023                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1024         }
1025
1026         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1027          * jumbo frames transmission.
1028          */
1029         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1030                 u32 phy_reg;
1031
1032                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1033                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1034                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1035         }
1036
1037         tg3_phy_set_wirespeed(tp);
1038         return 0;
1039 }
1040
1041 static void tg3_frob_aux_power(struct tg3 *tp)
1042 {
1043         struct tg3 *tp_peer = tp;
1044
1045         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1046                 return;
1047
1048         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1049             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1050                 struct net_device *dev_peer;
1051
1052                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1053                 /* remove_one() may have been run on the peer. */
1054                 if (!dev_peer)
1055                         tp_peer = tp;
1056                 else
1057                         tp_peer = netdev_priv(dev_peer);
1058         }
1059
1060         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1061             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1062             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1063             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1064                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1067                                     (GRC_LCLCTRL_GPIO_OE0 |
1068                                      GRC_LCLCTRL_GPIO_OE1 |
1069                                      GRC_LCLCTRL_GPIO_OE2 |
1070                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1071                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1072                                     100);
1073                 } else {
1074                         u32 no_gpio2;
1075                         u32 grc_local_ctrl = 0;
1076
1077                         if (tp_peer != tp &&
1078                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1079                                 return;
1080
1081                         /* Workaround to prevent overdrawing Amps. */
1082                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1083                             ASIC_REV_5714) {
1084                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1085                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1086                                             grc_local_ctrl, 100);
1087                         }
1088
1089                         /* On 5753 and variants, GPIO2 cannot be used. */
1090                         no_gpio2 = tp->nic_sram_data_cfg &
1091                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1092
1093                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1094                                          GRC_LCLCTRL_GPIO_OE1 |
1095                                          GRC_LCLCTRL_GPIO_OE2 |
1096                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1097                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1098                         if (no_gpio2) {
1099                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1100                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1101                         }
1102                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103                                                     grc_local_ctrl, 100);
1104
1105                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1106
1107                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1108                                                     grc_local_ctrl, 100);
1109
1110                         if (!no_gpio2) {
1111                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1112                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1113                                             grc_local_ctrl, 100);
1114                         }
1115                 }
1116         } else {
1117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1118                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1119                         if (tp_peer != tp &&
1120                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1121                                 return;
1122
1123                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124                                     (GRC_LCLCTRL_GPIO_OE1 |
1125                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1126
1127                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128                                     GRC_LCLCTRL_GPIO_OE1, 100);
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     (GRC_LCLCTRL_GPIO_OE1 |
1132                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1133                 }
1134         }
1135 }
1136
1137 static int tg3_setup_phy(struct tg3 *, int);
1138
1139 #define RESET_KIND_SHUTDOWN     0
1140 #define RESET_KIND_INIT         1
1141 #define RESET_KIND_SUSPEND      2
1142
1143 static void tg3_write_sig_post_reset(struct tg3 *, int);
1144 static int tg3_halt_cpu(struct tg3 *, u32);
1145 static int tg3_nvram_lock(struct tg3 *);
1146 static void tg3_nvram_unlock(struct tg3 *);
1147
1148 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1149 {
1150         u32 misc_host_ctrl;
1151         u16 power_control, power_caps;
1152         int pm = tp->pm_cap;
1153
1154         /* Make sure register accesses (indirect or otherwise)
1155          * will function correctly.
1156          */
1157         pci_write_config_dword(tp->pdev,
1158                                TG3PCI_MISC_HOST_CTRL,
1159                                tp->misc_host_ctrl);
1160
1161         pci_read_config_word(tp->pdev,
1162                              pm + PCI_PM_CTRL,
1163                              &power_control);
1164         power_control |= PCI_PM_CTRL_PME_STATUS;
1165         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1166         switch (state) {
1167         case PCI_D0:
1168                 power_control |= 0;
1169                 pci_write_config_word(tp->pdev,
1170                                       pm + PCI_PM_CTRL,
1171                                       power_control);
1172                 udelay(100);    /* Delay after power state change */
1173
1174                 /* Switch out of Vaux if it is not a LOM */
1175                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1176                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1177
1178                 return 0;
1179
1180         case PCI_D1:
1181                 power_control |= 1;
1182                 break;
1183
1184         case PCI_D2:
1185                 power_control |= 2;
1186                 break;
1187
1188         case PCI_D3hot:
1189                 power_control |= 3;
1190                 break;
1191
1192         default:
1193                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1194                        "requested.\n",
1195                        tp->dev->name, state);
1196                 return -EINVAL;
1197         };
1198
1199         power_control |= PCI_PM_CTRL_PME_ENABLE;
1200
1201         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1202         tw32(TG3PCI_MISC_HOST_CTRL,
1203              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1204
1205         if (tp->link_config.phy_is_low_power == 0) {
1206                 tp->link_config.phy_is_low_power = 1;
1207                 tp->link_config.orig_speed = tp->link_config.speed;
1208                 tp->link_config.orig_duplex = tp->link_config.duplex;
1209                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1210         }
1211
1212         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1213                 tp->link_config.speed = SPEED_10;
1214                 tp->link_config.duplex = DUPLEX_HALF;
1215                 tp->link_config.autoneg = AUTONEG_ENABLE;
1216                 tg3_setup_phy(tp, 0);
1217         }
1218
1219         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1220                 int i;
1221                 u32 val;
1222
1223                 for (i = 0; i < 200; i++) {
1224                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1225                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1226                                 break;
1227                         msleep(1);
1228                 }
1229         }
1230         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1231                                              WOL_DRV_STATE_SHUTDOWN |
1232                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1233
1234         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1235
1236         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1237                 u32 mac_mode;
1238
1239                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1240                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1241                         udelay(40);
1242
1243                         mac_mode = MAC_MODE_PORT_MODE_MII;
1244
1245                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1246                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1247                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1248                 } else {
1249                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1250                 }
1251
1252                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1253                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1254
1255                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1256                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1257                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1258
1259                 tw32_f(MAC_MODE, mac_mode);
1260                 udelay(100);
1261
1262                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1263                 udelay(10);
1264         }
1265
1266         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1267             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1268              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1269                 u32 base_val;
1270
1271                 base_val = tp->pci_clock_ctrl;
1272                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1273                              CLOCK_CTRL_TXCLK_DISABLE);
1274
1275                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1276                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1277         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1278                 /* do nothing */
1279         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1280                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1281                 u32 newbits1, newbits2;
1282
1283                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1284                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1285                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1286                                     CLOCK_CTRL_TXCLK_DISABLE |
1287                                     CLOCK_CTRL_ALTCLK);
1288                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1289                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1290                         newbits1 = CLOCK_CTRL_625_CORE;
1291                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1292                 } else {
1293                         newbits1 = CLOCK_CTRL_ALTCLK;
1294                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1295                 }
1296
1297                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1298                             40);
1299
1300                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1301                             40);
1302
1303                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1304                         u32 newbits3;
1305
1306                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1307                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1308                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1309                                             CLOCK_CTRL_TXCLK_DISABLE |
1310                                             CLOCK_CTRL_44MHZ_CORE);
1311                         } else {
1312                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1313                         }
1314
1315                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1316                                     tp->pci_clock_ctrl | newbits3, 40);
1317                 }
1318         }
1319
1320         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1321             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1322                 /* Turn off the PHY */
1323                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1324                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1325                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1326                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1327                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1328                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1329                 }
1330         }
1331
1332         tg3_frob_aux_power(tp);
1333
1334         /* Workaround for unstable PLL clock */
1335         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1336             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1337                 u32 val = tr32(0x7d00);
1338
1339                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1340                 tw32(0x7d00, val);
1341                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1342                         int err;
1343
1344                         err = tg3_nvram_lock(tp);
1345                         tg3_halt_cpu(tp, RX_CPU_BASE);
1346                         if (!err)
1347                                 tg3_nvram_unlock(tp);
1348                 }
1349         }
1350
1351         /* Finally, set the new power state. */
1352         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1353         udelay(100);    /* Delay after power state change */
1354
1355         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1356
1357         return 0;
1358 }
1359
1360 static void tg3_link_report(struct tg3 *tp)
1361 {
1362         if (!netif_carrier_ok(tp->dev)) {
1363                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1364         } else {
1365                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1366                        tp->dev->name,
1367                        (tp->link_config.active_speed == SPEED_1000 ?
1368                         1000 :
1369                         (tp->link_config.active_speed == SPEED_100 ?
1370                          100 : 10)),
1371                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1372                         "full" : "half"));
1373
1374                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1375                        "%s for RX.\n",
1376                        tp->dev->name,
1377                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1378                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1379         }
1380 }
1381
1382 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1383 {
1384         u32 new_tg3_flags = 0;
1385         u32 old_rx_mode = tp->rx_mode;
1386         u32 old_tx_mode = tp->tx_mode;
1387
1388         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1389
1390                 /* Convert 1000BaseX flow control bits to 1000BaseT
1391                  * bits before resolving flow control.
1392                  */
1393                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1394                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1395                                        ADVERTISE_PAUSE_ASYM);
1396                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1397
1398                         if (local_adv & ADVERTISE_1000XPAUSE)
1399                                 local_adv |= ADVERTISE_PAUSE_CAP;
1400                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1401                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1402                         if (remote_adv & LPA_1000XPAUSE)
1403                                 remote_adv |= LPA_PAUSE_CAP;
1404                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1405                                 remote_adv |= LPA_PAUSE_ASYM;
1406                 }
1407
1408                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1409                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410                                 if (remote_adv & LPA_PAUSE_CAP)
1411                                         new_tg3_flags |=
1412                                                 (TG3_FLAG_RX_PAUSE |
1413                                                 TG3_FLAG_TX_PAUSE);
1414                                 else if (remote_adv & LPA_PAUSE_ASYM)
1415                                         new_tg3_flags |=
1416                                                 (TG3_FLAG_RX_PAUSE);
1417                         } else {
1418                                 if (remote_adv & LPA_PAUSE_CAP)
1419                                         new_tg3_flags |=
1420                                                 (TG3_FLAG_RX_PAUSE |
1421                                                 TG3_FLAG_TX_PAUSE);
1422                         }
1423                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1424                         if ((remote_adv & LPA_PAUSE_CAP) &&
1425                         (remote_adv & LPA_PAUSE_ASYM))
1426                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1427                 }
1428
1429                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1430                 tp->tg3_flags |= new_tg3_flags;
1431         } else {
1432                 new_tg3_flags = tp->tg3_flags;
1433         }
1434
1435         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1436                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1437         else
1438                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1439
1440         if (old_rx_mode != tp->rx_mode) {
1441                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1442         }
1443         
1444         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1445                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1446         else
1447                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1448
1449         if (old_tx_mode != tp->tx_mode) {
1450                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1451         }
1452 }
1453
1454 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1455 {
1456         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1457         case MII_TG3_AUX_STAT_10HALF:
1458                 *speed = SPEED_10;
1459                 *duplex = DUPLEX_HALF;
1460                 break;
1461
1462         case MII_TG3_AUX_STAT_10FULL:
1463                 *speed = SPEED_10;
1464                 *duplex = DUPLEX_FULL;
1465                 break;
1466
1467         case MII_TG3_AUX_STAT_100HALF:
1468                 *speed = SPEED_100;
1469                 *duplex = DUPLEX_HALF;
1470                 break;
1471
1472         case MII_TG3_AUX_STAT_100FULL:
1473                 *speed = SPEED_100;
1474                 *duplex = DUPLEX_FULL;
1475                 break;
1476
1477         case MII_TG3_AUX_STAT_1000HALF:
1478                 *speed = SPEED_1000;
1479                 *duplex = DUPLEX_HALF;
1480                 break;
1481
1482         case MII_TG3_AUX_STAT_1000FULL:
1483                 *speed = SPEED_1000;
1484                 *duplex = DUPLEX_FULL;
1485                 break;
1486
1487         default:
1488                 *speed = SPEED_INVALID;
1489                 *duplex = DUPLEX_INVALID;
1490                 break;
1491         };
1492 }
1493
1494 static void tg3_phy_copper_begin(struct tg3 *tp)
1495 {
1496         u32 new_adv;
1497         int i;
1498
1499         if (tp->link_config.phy_is_low_power) {
1500                 /* Entering low power mode.  Disable gigabit and
1501                  * 100baseT advertisements.
1502                  */
1503                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1504
1505                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1506                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1507                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1508                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1509
1510                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1511         } else if (tp->link_config.speed == SPEED_INVALID) {
1512                 tp->link_config.advertising =
1513                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1514                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1515                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1516                          ADVERTISED_Autoneg | ADVERTISED_MII);
1517
1518                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1519                         tp->link_config.advertising &=
1520                                 ~(ADVERTISED_1000baseT_Half |
1521                                   ADVERTISED_1000baseT_Full);
1522
1523                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1524                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1525                         new_adv |= ADVERTISE_10HALF;
1526                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1527                         new_adv |= ADVERTISE_10FULL;
1528                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1529                         new_adv |= ADVERTISE_100HALF;
1530                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1531                         new_adv |= ADVERTISE_100FULL;
1532                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1533
1534                 if (tp->link_config.advertising &
1535                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1536                         new_adv = 0;
1537                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1538                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1539                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1540                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1541                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1542                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1544                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1545                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1546                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1547                 } else {
1548                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1549                 }
1550         } else {
1551                 /* Asking for a specific link mode. */
1552                 if (tp->link_config.speed == SPEED_1000) {
1553                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1554                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1555
1556                         if (tp->link_config.duplex == DUPLEX_FULL)
1557                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1558                         else
1559                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1560                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1561                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1562                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1563                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1564                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1565                 } else {
1566                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1567
1568                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1569                         if (tp->link_config.speed == SPEED_100) {
1570                                 if (tp->link_config.duplex == DUPLEX_FULL)
1571                                         new_adv |= ADVERTISE_100FULL;
1572                                 else
1573                                         new_adv |= ADVERTISE_100HALF;
1574                         } else {
1575                                 if (tp->link_config.duplex == DUPLEX_FULL)
1576                                         new_adv |= ADVERTISE_10FULL;
1577                                 else
1578                                         new_adv |= ADVERTISE_10HALF;
1579                         }
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581                 }
1582         }
1583
1584         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1585             tp->link_config.speed != SPEED_INVALID) {
1586                 u32 bmcr, orig_bmcr;
1587
1588                 tp->link_config.active_speed = tp->link_config.speed;
1589                 tp->link_config.active_duplex = tp->link_config.duplex;
1590
1591                 bmcr = 0;
1592                 switch (tp->link_config.speed) {
1593                 default:
1594                 case SPEED_10:
1595                         break;
1596
1597                 case SPEED_100:
1598                         bmcr |= BMCR_SPEED100;
1599                         break;
1600
1601                 case SPEED_1000:
1602                         bmcr |= TG3_BMCR_SPEED1000;
1603                         break;
1604                 };
1605
1606                 if (tp->link_config.duplex == DUPLEX_FULL)
1607                         bmcr |= BMCR_FULLDPLX;
1608
1609                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1610                     (bmcr != orig_bmcr)) {
1611                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1612                         for (i = 0; i < 1500; i++) {
1613                                 u32 tmp;
1614
1615                                 udelay(10);
1616                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1617                                     tg3_readphy(tp, MII_BMSR, &tmp))
1618                                         continue;
1619                                 if (!(tmp & BMSR_LSTATUS)) {
1620                                         udelay(40);
1621                                         break;
1622                                 }
1623                         }
1624                         tg3_writephy(tp, MII_BMCR, bmcr);
1625                         udelay(40);
1626                 }
1627         } else {
1628                 tg3_writephy(tp, MII_BMCR,
1629                              BMCR_ANENABLE | BMCR_ANRESTART);
1630         }
1631 }
1632
1633 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1634 {
1635         int err;
1636
1637         /* Turn off tap power management. */
1638         /* Set Extended packet length bit */
1639         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1640
1641         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1642         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1643
1644         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1645         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1646
1647         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1648         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1649
1650         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1651         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1652
1653         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1654         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1655
1656         udelay(40);
1657
1658         return err;
1659 }
1660
1661 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1662 {
1663         u32 adv_reg, all_mask;
1664
1665         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1666                 return 0;
1667
1668         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1669                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1670         if ((adv_reg & all_mask) != all_mask)
1671                 return 0;
1672         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1673                 u32 tg3_ctrl;
1674
1675                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1676                         return 0;
1677
1678                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1679                             MII_TG3_CTRL_ADV_1000_FULL);
1680                 if ((tg3_ctrl & all_mask) != all_mask)
1681                         return 0;
1682         }
1683         return 1;
1684 }
1685
1686 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1687 {
1688         int current_link_up;
1689         u32 bmsr, dummy;
1690         u16 current_speed;
1691         u8 current_duplex;
1692         int i, err;
1693
1694         tw32(MAC_EVENT, 0);
1695
1696         tw32_f(MAC_STATUS,
1697              (MAC_STATUS_SYNC_CHANGED |
1698               MAC_STATUS_CFG_CHANGED |
1699               MAC_STATUS_MI_COMPLETION |
1700               MAC_STATUS_LNKSTATE_CHANGED));
1701         udelay(40);
1702
1703         tp->mi_mode = MAC_MI_MODE_BASE;
1704         tw32_f(MAC_MI_MODE, tp->mi_mode);
1705         udelay(80);
1706
1707         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1708
1709         /* Some third-party PHYs need to be reset on link going
1710          * down.
1711          */
1712         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1713              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1714              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1715             netif_carrier_ok(tp->dev)) {
1716                 tg3_readphy(tp, MII_BMSR, &bmsr);
1717                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1718                     !(bmsr & BMSR_LSTATUS))
1719                         force_reset = 1;
1720         }
1721         if (force_reset)
1722                 tg3_phy_reset(tp);
1723
1724         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1725                 tg3_readphy(tp, MII_BMSR, &bmsr);
1726                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1727                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1728                         bmsr = 0;
1729
1730                 if (!(bmsr & BMSR_LSTATUS)) {
1731                         err = tg3_init_5401phy_dsp(tp);
1732                         if (err)
1733                                 return err;
1734
1735                         tg3_readphy(tp, MII_BMSR, &bmsr);
1736                         for (i = 0; i < 1000; i++) {
1737                                 udelay(10);
1738                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1739                                     (bmsr & BMSR_LSTATUS)) {
1740                                         udelay(40);
1741                                         break;
1742                                 }
1743                         }
1744
1745                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1746                             !(bmsr & BMSR_LSTATUS) &&
1747                             tp->link_config.active_speed == SPEED_1000) {
1748                                 err = tg3_phy_reset(tp);
1749                                 if (!err)
1750                                         err = tg3_init_5401phy_dsp(tp);
1751                                 if (err)
1752                                         return err;
1753                         }
1754                 }
1755         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1756                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1757                 /* 5701 {A0,B0} CRC bug workaround */
1758                 tg3_writephy(tp, 0x15, 0x0a75);
1759                 tg3_writephy(tp, 0x1c, 0x8c68);
1760                 tg3_writephy(tp, 0x1c, 0x8d68);
1761                 tg3_writephy(tp, 0x1c, 0x8c68);
1762         }
1763
1764         /* Clear pending interrupts... */
1765         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1766         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1767
1768         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1769                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1770         else
1771                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1772
1773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1774             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1775                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1776                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1777                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1778                 else
1779                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1780         }
1781
1782         current_link_up = 0;
1783         current_speed = SPEED_INVALID;
1784         current_duplex = DUPLEX_INVALID;
1785
1786         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1787                 u32 val;
1788
1789                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1790                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1791                 if (!(val & (1 << 10))) {
1792                         val |= (1 << 10);
1793                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1794                         goto relink;
1795                 }
1796         }
1797
1798         bmsr = 0;
1799         for (i = 0; i < 100; i++) {
1800                 tg3_readphy(tp, MII_BMSR, &bmsr);
1801                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1802                     (bmsr & BMSR_LSTATUS))
1803                         break;
1804                 udelay(40);
1805         }
1806
1807         if (bmsr & BMSR_LSTATUS) {
1808                 u32 aux_stat, bmcr;
1809
1810                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1811                 for (i = 0; i < 2000; i++) {
1812                         udelay(10);
1813                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1814                             aux_stat)
1815                                 break;
1816                 }
1817
1818                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1819                                              &current_speed,
1820                                              &current_duplex);
1821
1822                 bmcr = 0;
1823                 for (i = 0; i < 200; i++) {
1824                         tg3_readphy(tp, MII_BMCR, &bmcr);
1825                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1826                                 continue;
1827                         if (bmcr && bmcr != 0x7fff)
1828                                 break;
1829                         udelay(10);
1830                 }
1831
1832                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1833                         if (bmcr & BMCR_ANENABLE) {
1834                                 current_link_up = 1;
1835
1836                                 /* Force autoneg restart if we are exiting
1837                                  * low power mode.
1838                                  */
1839                                 if (!tg3_copper_is_advertising_all(tp))
1840                                         current_link_up = 0;
1841                         } else {
1842                                 current_link_up = 0;
1843                         }
1844                 } else {
1845                         if (!(bmcr & BMCR_ANENABLE) &&
1846                             tp->link_config.speed == current_speed &&
1847                             tp->link_config.duplex == current_duplex) {
1848                                 current_link_up = 1;
1849                         } else {
1850                                 current_link_up = 0;
1851                         }
1852                 }
1853
1854                 tp->link_config.active_speed = current_speed;
1855                 tp->link_config.active_duplex = current_duplex;
1856         }
1857
1858         if (current_link_up == 1 &&
1859             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1860             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1861                 u32 local_adv, remote_adv;
1862
1863                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1864                         local_adv = 0;
1865                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1866
1867                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1868                         remote_adv = 0;
1869
1870                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1871
1872                 /* If we are not advertising full pause capability,
1873                  * something is wrong.  Bring the link down and reconfigure.
1874                  */
1875                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1876                         current_link_up = 0;
1877                 } else {
1878                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1879                 }
1880         }
1881 relink:
1882         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1883                 u32 tmp;
1884
1885                 tg3_phy_copper_begin(tp);
1886
1887                 tg3_readphy(tp, MII_BMSR, &tmp);
1888                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1889                     (tmp & BMSR_LSTATUS))
1890                         current_link_up = 1;
1891         }
1892
1893         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1894         if (current_link_up == 1) {
1895                 if (tp->link_config.active_speed == SPEED_100 ||
1896                     tp->link_config.active_speed == SPEED_10)
1897                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1898                 else
1899                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1900         } else
1901                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1902
1903         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1904         if (tp->link_config.active_duplex == DUPLEX_HALF)
1905                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1906
1907         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1909                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1910                     (current_link_up == 1 &&
1911                      tp->link_config.active_speed == SPEED_10))
1912                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1913         } else {
1914                 if (current_link_up == 1)
1915                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1916         }
1917
1918         /* ??? Without this setting Netgear GA302T PHY does not
1919          * ??? send/receive packets...
1920          */
1921         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1922             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1923                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1924                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1925                 udelay(80);
1926         }
1927
1928         tw32_f(MAC_MODE, tp->mac_mode);
1929         udelay(40);
1930
1931         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1932                 /* Polled via timer. */
1933                 tw32_f(MAC_EVENT, 0);
1934         } else {
1935                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1936         }
1937         udelay(40);
1938
1939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1940             current_link_up == 1 &&
1941             tp->link_config.active_speed == SPEED_1000 &&
1942             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1943              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1944                 udelay(120);
1945                 tw32_f(MAC_STATUS,
1946                      (MAC_STATUS_SYNC_CHANGED |
1947                       MAC_STATUS_CFG_CHANGED));
1948                 udelay(40);
1949                 tg3_write_mem(tp,
1950                               NIC_SRAM_FIRMWARE_MBOX,
1951                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1952         }
1953
1954         if (current_link_up != netif_carrier_ok(tp->dev)) {
1955                 if (current_link_up)
1956                         netif_carrier_on(tp->dev);
1957                 else
1958                         netif_carrier_off(tp->dev);
1959                 tg3_link_report(tp);
1960         }
1961
1962         return 0;
1963 }
1964
1965 struct tg3_fiber_aneginfo {
1966         int state;
1967 #define ANEG_STATE_UNKNOWN              0
1968 #define ANEG_STATE_AN_ENABLE            1
1969 #define ANEG_STATE_RESTART_INIT         2
1970 #define ANEG_STATE_RESTART              3
1971 #define ANEG_STATE_DISABLE_LINK_OK      4
1972 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1973 #define ANEG_STATE_ABILITY_DETECT       6
1974 #define ANEG_STATE_ACK_DETECT_INIT      7
1975 #define ANEG_STATE_ACK_DETECT           8
1976 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1977 #define ANEG_STATE_COMPLETE_ACK         10
1978 #define ANEG_STATE_IDLE_DETECT_INIT     11
1979 #define ANEG_STATE_IDLE_DETECT          12
1980 #define ANEG_STATE_LINK_OK              13
1981 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1982 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1983
1984         u32 flags;
1985 #define MR_AN_ENABLE            0x00000001
1986 #define MR_RESTART_AN           0x00000002
1987 #define MR_AN_COMPLETE          0x00000004
1988 #define MR_PAGE_RX              0x00000008
1989 #define MR_NP_LOADED            0x00000010
1990 #define MR_TOGGLE_TX            0x00000020
1991 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1992 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1993 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1994 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1995 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1996 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1997 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1998 #define MR_TOGGLE_RX            0x00002000
1999 #define MR_NP_RX                0x00004000
2000
2001 #define MR_LINK_OK              0x80000000
2002
2003         unsigned long link_time, cur_time;
2004
2005         u32 ability_match_cfg;
2006         int ability_match_count;
2007
2008         char ability_match, idle_match, ack_match;
2009
2010         u32 txconfig, rxconfig;
2011 #define ANEG_CFG_NP             0x00000080
2012 #define ANEG_CFG_ACK            0x00000040
2013 #define ANEG_CFG_RF2            0x00000020
2014 #define ANEG_CFG_RF1            0x00000010
2015 #define ANEG_CFG_PS2            0x00000001
2016 #define ANEG_CFG_PS1            0x00008000
2017 #define ANEG_CFG_HD             0x00004000
2018 #define ANEG_CFG_FD             0x00002000
2019 #define ANEG_CFG_INVAL          0x00001f06
2020
2021 };
2022 #define ANEG_OK         0
2023 #define ANEG_DONE       1
2024 #define ANEG_TIMER_ENAB 2
2025 #define ANEG_FAILED     -1
2026
2027 #define ANEG_STATE_SETTLE_TIME  10000
2028
2029 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2030                                    struct tg3_fiber_aneginfo *ap)
2031 {
2032         unsigned long delta;
2033         u32 rx_cfg_reg;
2034         int ret;
2035
2036         if (ap->state == ANEG_STATE_UNKNOWN) {
2037                 ap->rxconfig = 0;
2038                 ap->link_time = 0;
2039                 ap->cur_time = 0;
2040                 ap->ability_match_cfg = 0;
2041                 ap->ability_match_count = 0;
2042                 ap->ability_match = 0;
2043                 ap->idle_match = 0;
2044                 ap->ack_match = 0;
2045         }
2046         ap->cur_time++;
2047
2048         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2049                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2050
2051                 if (rx_cfg_reg != ap->ability_match_cfg) {
2052                         ap->ability_match_cfg = rx_cfg_reg;
2053                         ap->ability_match = 0;
2054                         ap->ability_match_count = 0;
2055                 } else {
2056                         if (++ap->ability_match_count > 1) {
2057                                 ap->ability_match = 1;
2058                                 ap->ability_match_cfg = rx_cfg_reg;
2059                         }
2060                 }
2061                 if (rx_cfg_reg & ANEG_CFG_ACK)
2062                         ap->ack_match = 1;
2063                 else
2064                         ap->ack_match = 0;
2065
2066                 ap->idle_match = 0;
2067         } else {
2068                 ap->idle_match = 1;
2069                 ap->ability_match_cfg = 0;
2070                 ap->ability_match_count = 0;
2071                 ap->ability_match = 0;
2072                 ap->ack_match = 0;
2073
2074                 rx_cfg_reg = 0;
2075         }
2076
2077         ap->rxconfig = rx_cfg_reg;
2078         ret = ANEG_OK;
2079
2080         switch(ap->state) {
2081         case ANEG_STATE_UNKNOWN:
2082                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2083                         ap->state = ANEG_STATE_AN_ENABLE;
2084
2085                 /* fallthru */
2086         case ANEG_STATE_AN_ENABLE:
2087                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2088                 if (ap->flags & MR_AN_ENABLE) {
2089                         ap->link_time = 0;
2090                         ap->cur_time = 0;
2091                         ap->ability_match_cfg = 0;
2092                         ap->ability_match_count = 0;
2093                         ap->ability_match = 0;
2094                         ap->idle_match = 0;
2095                         ap->ack_match = 0;
2096
2097                         ap->state = ANEG_STATE_RESTART_INIT;
2098                 } else {
2099                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2100                 }
2101                 break;
2102
2103         case ANEG_STATE_RESTART_INIT:
2104                 ap->link_time = ap->cur_time;
2105                 ap->flags &= ~(MR_NP_LOADED);
2106                 ap->txconfig = 0;
2107                 tw32(MAC_TX_AUTO_NEG, 0);
2108                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2109                 tw32_f(MAC_MODE, tp->mac_mode);
2110                 udelay(40);
2111
2112                 ret = ANEG_TIMER_ENAB;
2113                 ap->state = ANEG_STATE_RESTART;
2114
2115                 /* fallthru */
2116         case ANEG_STATE_RESTART:
2117                 delta = ap->cur_time - ap->link_time;
2118                 if (delta > ANEG_STATE_SETTLE_TIME) {
2119                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2120                 } else {
2121                         ret = ANEG_TIMER_ENAB;
2122                 }
2123                 break;
2124
2125         case ANEG_STATE_DISABLE_LINK_OK:
2126                 ret = ANEG_DONE;
2127                 break;
2128
2129         case ANEG_STATE_ABILITY_DETECT_INIT:
2130                 ap->flags &= ~(MR_TOGGLE_TX);
2131                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2132                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2133                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2134                 tw32_f(MAC_MODE, tp->mac_mode);
2135                 udelay(40);
2136
2137                 ap->state = ANEG_STATE_ABILITY_DETECT;
2138                 break;
2139
2140         case ANEG_STATE_ABILITY_DETECT:
2141                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2142                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2143                 }
2144                 break;
2145
2146         case ANEG_STATE_ACK_DETECT_INIT:
2147                 ap->txconfig |= ANEG_CFG_ACK;
2148                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2149                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150                 tw32_f(MAC_MODE, tp->mac_mode);
2151                 udelay(40);
2152
2153                 ap->state = ANEG_STATE_ACK_DETECT;
2154
2155                 /* fallthru */
2156         case ANEG_STATE_ACK_DETECT:
2157                 if (ap->ack_match != 0) {
2158                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2159                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2160                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2161                         } else {
2162                                 ap->state = ANEG_STATE_AN_ENABLE;
2163                         }
2164                 } else if (ap->ability_match != 0 &&
2165                            ap->rxconfig == 0) {
2166                         ap->state = ANEG_STATE_AN_ENABLE;
2167                 }
2168                 break;
2169
2170         case ANEG_STATE_COMPLETE_ACK_INIT:
2171                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2172                         ret = ANEG_FAILED;
2173                         break;
2174                 }
2175                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2176                                MR_LP_ADV_HALF_DUPLEX |
2177                                MR_LP_ADV_SYM_PAUSE |
2178                                MR_LP_ADV_ASYM_PAUSE |
2179                                MR_LP_ADV_REMOTE_FAULT1 |
2180                                MR_LP_ADV_REMOTE_FAULT2 |
2181                                MR_LP_ADV_NEXT_PAGE |
2182                                MR_TOGGLE_RX |
2183                                MR_NP_RX);
2184                 if (ap->rxconfig & ANEG_CFG_FD)
2185                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2186                 if (ap->rxconfig & ANEG_CFG_HD)
2187                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2188                 if (ap->rxconfig & ANEG_CFG_PS1)
2189                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2190                 if (ap->rxconfig & ANEG_CFG_PS2)
2191                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2192                 if (ap->rxconfig & ANEG_CFG_RF1)
2193                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2194                 if (ap->rxconfig & ANEG_CFG_RF2)
2195                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2196                 if (ap->rxconfig & ANEG_CFG_NP)
2197                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2198
2199                 ap->link_time = ap->cur_time;
2200
2201                 ap->flags ^= (MR_TOGGLE_TX);
2202                 if (ap->rxconfig & 0x0008)
2203                         ap->flags |= MR_TOGGLE_RX;
2204                 if (ap->rxconfig & ANEG_CFG_NP)
2205                         ap->flags |= MR_NP_RX;
2206                 ap->flags |= MR_PAGE_RX;
2207
2208                 ap->state = ANEG_STATE_COMPLETE_ACK;
2209                 ret = ANEG_TIMER_ENAB;
2210                 break;
2211
2212         case ANEG_STATE_COMPLETE_ACK:
2213                 if (ap->ability_match != 0 &&
2214                     ap->rxconfig == 0) {
2215                         ap->state = ANEG_STATE_AN_ENABLE;
2216                         break;
2217                 }
2218                 delta = ap->cur_time - ap->link_time;
2219                 if (delta > ANEG_STATE_SETTLE_TIME) {
2220                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2221                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2222                         } else {
2223                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2224                                     !(ap->flags & MR_NP_RX)) {
2225                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2226                                 } else {
2227                                         ret = ANEG_FAILED;
2228                                 }
2229                         }
2230                 }
2231                 break;
2232
2233         case ANEG_STATE_IDLE_DETECT_INIT:
2234                 ap->link_time = ap->cur_time;
2235                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2236                 tw32_f(MAC_MODE, tp->mac_mode);
2237                 udelay(40);
2238
2239                 ap->state = ANEG_STATE_IDLE_DETECT;
2240                 ret = ANEG_TIMER_ENAB;
2241                 break;
2242
2243         case ANEG_STATE_IDLE_DETECT:
2244                 if (ap->ability_match != 0 &&
2245                     ap->rxconfig == 0) {
2246                         ap->state = ANEG_STATE_AN_ENABLE;
2247                         break;
2248                 }
2249                 delta = ap->cur_time - ap->link_time;
2250                 if (delta > ANEG_STATE_SETTLE_TIME) {
2251                         /* XXX another gem from the Broadcom driver :( */
2252                         ap->state = ANEG_STATE_LINK_OK;
2253                 }
2254                 break;
2255
2256         case ANEG_STATE_LINK_OK:
2257                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2258                 ret = ANEG_DONE;
2259                 break;
2260
2261         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2262                 /* ??? unimplemented */
2263                 break;
2264
2265         case ANEG_STATE_NEXT_PAGE_WAIT:
2266                 /* ??? unimplemented */
2267                 break;
2268
2269         default:
2270                 ret = ANEG_FAILED;
2271                 break;
2272         };
2273
2274         return ret;
2275 }
2276
2277 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2278 {
2279         int res = 0;
2280         struct tg3_fiber_aneginfo aninfo;
2281         int status = ANEG_FAILED;
2282         unsigned int tick;
2283         u32 tmp;
2284
2285         tw32_f(MAC_TX_AUTO_NEG, 0);
2286
2287         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2288         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2289         udelay(40);
2290
2291         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2292         udelay(40);
2293
2294         memset(&aninfo, 0, sizeof(aninfo));
2295         aninfo.flags |= MR_AN_ENABLE;
2296         aninfo.state = ANEG_STATE_UNKNOWN;
2297         aninfo.cur_time = 0;
2298         tick = 0;
2299         while (++tick < 195000) {
2300                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2301                 if (status == ANEG_DONE || status == ANEG_FAILED)
2302                         break;
2303
2304                 udelay(1);
2305         }
2306
2307         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2308         tw32_f(MAC_MODE, tp->mac_mode);
2309         udelay(40);
2310
2311         *flags = aninfo.flags;
2312
2313         if (status == ANEG_DONE &&
2314             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2315                              MR_LP_ADV_FULL_DUPLEX)))
2316                 res = 1;
2317
2318         return res;
2319 }
2320
2321 static void tg3_init_bcm8002(struct tg3 *tp)
2322 {
2323         u32 mac_status = tr32(MAC_STATUS);
2324         int i;
2325
2326         /* Reset when initting first time or we have a link. */
2327         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2328             !(mac_status & MAC_STATUS_PCS_SYNCED))
2329                 return;
2330
2331         /* Set PLL lock range. */
2332         tg3_writephy(tp, 0x16, 0x8007);
2333
2334         /* SW reset */
2335         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2336
2337         /* Wait for reset to complete. */
2338         /* XXX schedule_timeout() ... */
2339         for (i = 0; i < 500; i++)
2340                 udelay(10);
2341
2342         /* Config mode; select PMA/Ch 1 regs. */
2343         tg3_writephy(tp, 0x10, 0x8411);
2344
2345         /* Enable auto-lock and comdet, select txclk for tx. */
2346         tg3_writephy(tp, 0x11, 0x0a10);
2347
2348         tg3_writephy(tp, 0x18, 0x00a0);
2349         tg3_writephy(tp, 0x16, 0x41ff);
2350
2351         /* Assert and deassert POR. */
2352         tg3_writephy(tp, 0x13, 0x0400);
2353         udelay(40);
2354         tg3_writephy(tp, 0x13, 0x0000);
2355
2356         tg3_writephy(tp, 0x11, 0x0a50);
2357         udelay(40);
2358         tg3_writephy(tp, 0x11, 0x0a10);
2359
2360         /* Wait for signal to stabilize */
2361         /* XXX schedule_timeout() ... */
2362         for (i = 0; i < 15000; i++)
2363                 udelay(10);
2364
2365         /* Deselect the channel register so we can read the PHYID
2366          * later.
2367          */
2368         tg3_writephy(tp, 0x10, 0x8011);
2369 }
2370
2371 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2372 {
2373         u32 sg_dig_ctrl, sg_dig_status;
2374         u32 serdes_cfg, expected_sg_dig_ctrl;
2375         int workaround, port_a;
2376         int current_link_up;
2377
2378         serdes_cfg = 0;
2379         expected_sg_dig_ctrl = 0;
2380         workaround = 0;
2381         port_a = 1;
2382         current_link_up = 0;
2383
2384         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2385             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2386                 workaround = 1;
2387                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2388                         port_a = 0;
2389
2390                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2391                 /* preserve bits 20-23 for voltage regulator */
2392                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2393         }
2394
2395         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2396
2397         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2398                 if (sg_dig_ctrl & (1 << 31)) {
2399                         if (workaround) {
2400                                 u32 val = serdes_cfg;
2401
2402                                 if (port_a)
2403                                         val |= 0xc010000;
2404                                 else
2405                                         val |= 0x4010000;
2406                                 tw32_f(MAC_SERDES_CFG, val);
2407                         }
2408                         tw32_f(SG_DIG_CTRL, 0x01388400);
2409                 }
2410                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2411                         tg3_setup_flow_control(tp, 0, 0);
2412                         current_link_up = 1;
2413                 }
2414                 goto out;
2415         }
2416
2417         /* Want auto-negotiation.  */
2418         expected_sg_dig_ctrl = 0x81388400;
2419
2420         /* Pause capability */
2421         expected_sg_dig_ctrl |= (1 << 11);
2422
2423         /* Asymettric pause */
2424         expected_sg_dig_ctrl |= (1 << 12);
2425
2426         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2427                 if (workaround)
2428                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2429                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2430                 udelay(5);
2431                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2432
2433                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2434         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2435                                  MAC_STATUS_SIGNAL_DET)) {
2436                 int i;
2437
2438                 /* Giver time to negotiate (~200ms) */
2439                 for (i = 0; i < 40000; i++) {
2440                         sg_dig_status = tr32(SG_DIG_STATUS);
2441                         if (sg_dig_status & (0x3))
2442                                 break;
2443                         udelay(5);
2444                 }
2445                 mac_status = tr32(MAC_STATUS);
2446
2447                 if ((sg_dig_status & (1 << 1)) &&
2448                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2449                         u32 local_adv, remote_adv;
2450
2451                         local_adv = ADVERTISE_PAUSE_CAP;
2452                         remote_adv = 0;
2453                         if (sg_dig_status & (1 << 19))
2454                                 remote_adv |= LPA_PAUSE_CAP;
2455                         if (sg_dig_status & (1 << 20))
2456                                 remote_adv |= LPA_PAUSE_ASYM;
2457
2458                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2459                         current_link_up = 1;
2460                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2461                 } else if (!(sg_dig_status & (1 << 1))) {
2462                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2463                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2464                         else {
2465                                 if (workaround) {
2466                                         u32 val = serdes_cfg;
2467
2468                                         if (port_a)
2469                                                 val |= 0xc010000;
2470                                         else
2471                                                 val |= 0x4010000;
2472
2473                                         tw32_f(MAC_SERDES_CFG, val);
2474                                 }
2475
2476                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2477                                 udelay(40);
2478
2479                                 /* Link parallel detection - link is up */
2480                                 /* only if we have PCS_SYNC and not */
2481                                 /* receiving config code words */
2482                                 mac_status = tr32(MAC_STATUS);
2483                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2484                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2485                                         tg3_setup_flow_control(tp, 0, 0);
2486                                         current_link_up = 1;
2487                                 }
2488                         }
2489                 }
2490         }
2491
2492 out:
2493         return current_link_up;
2494 }
2495
2496 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2497 {
2498         int current_link_up = 0;
2499
2500         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2501                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2502                 goto out;
2503         }
2504
2505         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2506                 u32 flags;
2507                 int i;
2508   
2509                 if (fiber_autoneg(tp, &flags)) {
2510                         u32 local_adv, remote_adv;
2511
2512                         local_adv = ADVERTISE_PAUSE_CAP;
2513                         remote_adv = 0;
2514                         if (flags & MR_LP_ADV_SYM_PAUSE)
2515                                 remote_adv |= LPA_PAUSE_CAP;
2516                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2517                                 remote_adv |= LPA_PAUSE_ASYM;
2518
2519                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2520
2521                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2522                         current_link_up = 1;
2523                 }
2524                 for (i = 0; i < 30; i++) {
2525                         udelay(20);
2526                         tw32_f(MAC_STATUS,
2527                                (MAC_STATUS_SYNC_CHANGED |
2528                                 MAC_STATUS_CFG_CHANGED));
2529                         udelay(40);
2530                         if ((tr32(MAC_STATUS) &
2531                              (MAC_STATUS_SYNC_CHANGED |
2532                               MAC_STATUS_CFG_CHANGED)) == 0)
2533                                 break;
2534                 }
2535
2536                 mac_status = tr32(MAC_STATUS);
2537                 if (current_link_up == 0 &&
2538                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2539                     !(mac_status & MAC_STATUS_RCVD_CFG))
2540                         current_link_up = 1;
2541         } else {
2542                 /* Forcing 1000FD link up. */
2543                 current_link_up = 1;
2544                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2545
2546                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2547                 udelay(40);
2548         }
2549
2550 out:
2551         return current_link_up;
2552 }
2553
2554 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2555 {
2556         u32 orig_pause_cfg;
2557         u16 orig_active_speed;
2558         u8 orig_active_duplex;
2559         u32 mac_status;
2560         int current_link_up;
2561         int i;
2562
2563         orig_pause_cfg =
2564                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2565                                   TG3_FLAG_TX_PAUSE));
2566         orig_active_speed = tp->link_config.active_speed;
2567         orig_active_duplex = tp->link_config.active_duplex;
2568
2569         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2570             netif_carrier_ok(tp->dev) &&
2571             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2572                 mac_status = tr32(MAC_STATUS);
2573                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2574                                MAC_STATUS_SIGNAL_DET |
2575                                MAC_STATUS_CFG_CHANGED |
2576                                MAC_STATUS_RCVD_CFG);
2577                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2578                                    MAC_STATUS_SIGNAL_DET)) {
2579                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2580                                             MAC_STATUS_CFG_CHANGED));
2581                         return 0;
2582                 }
2583         }
2584
2585         tw32_f(MAC_TX_AUTO_NEG, 0);
2586
2587         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2588         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2589         tw32_f(MAC_MODE, tp->mac_mode);
2590         udelay(40);
2591
2592         if (tp->phy_id == PHY_ID_BCM8002)
2593                 tg3_init_bcm8002(tp);
2594
2595         /* Enable link change event even when serdes polling.  */
2596         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2597         udelay(40);
2598
2599         current_link_up = 0;
2600         mac_status = tr32(MAC_STATUS);
2601
2602         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2603                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2604         else
2605                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2606
2607         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         tp->hw_status->status =
2612                 (SD_STATUS_UPDATED |
2613                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2614
2615         for (i = 0; i < 100; i++) {
2616                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2617                                     MAC_STATUS_CFG_CHANGED));
2618                 udelay(5);
2619                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2620                                          MAC_STATUS_CFG_CHANGED)) == 0)
2621                         break;
2622         }
2623
2624         mac_status = tr32(MAC_STATUS);
2625         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2626                 current_link_up = 0;
2627                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2628                         tw32_f(MAC_MODE, (tp->mac_mode |
2629                                           MAC_MODE_SEND_CONFIGS));
2630                         udelay(1);
2631                         tw32_f(MAC_MODE, tp->mac_mode);
2632                 }
2633         }
2634
2635         if (current_link_up == 1) {
2636                 tp->link_config.active_speed = SPEED_1000;
2637                 tp->link_config.active_duplex = DUPLEX_FULL;
2638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2639                                     LED_CTRL_LNKLED_OVERRIDE |
2640                                     LED_CTRL_1000MBPS_ON));
2641         } else {
2642                 tp->link_config.active_speed = SPEED_INVALID;
2643                 tp->link_config.active_duplex = DUPLEX_INVALID;
2644                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2645                                     LED_CTRL_LNKLED_OVERRIDE |
2646                                     LED_CTRL_TRAFFIC_OVERRIDE));
2647         }
2648
2649         if (current_link_up != netif_carrier_ok(tp->dev)) {
2650                 if (current_link_up)
2651                         netif_carrier_on(tp->dev);
2652                 else
2653                         netif_carrier_off(tp->dev);
2654                 tg3_link_report(tp);
2655         } else {
2656                 u32 now_pause_cfg =
2657                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2658                                          TG3_FLAG_TX_PAUSE);
2659                 if (orig_pause_cfg != now_pause_cfg ||
2660                     orig_active_speed != tp->link_config.active_speed ||
2661                     orig_active_duplex != tp->link_config.active_duplex)
2662                         tg3_link_report(tp);
2663         }
2664
2665         return 0;
2666 }
2667
2668 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2669 {
2670         int current_link_up, err = 0;
2671         u32 bmsr, bmcr;
2672         u16 current_speed;
2673         u8 current_duplex;
2674
2675         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2676         tw32_f(MAC_MODE, tp->mac_mode);
2677         udelay(40);
2678
2679         tw32(MAC_EVENT, 0);
2680
2681         tw32_f(MAC_STATUS,
2682              (MAC_STATUS_SYNC_CHANGED |
2683               MAC_STATUS_CFG_CHANGED |
2684               MAC_STATUS_MI_COMPLETION |
2685               MAC_STATUS_LNKSTATE_CHANGED));
2686         udelay(40);
2687
2688         if (force_reset)
2689                 tg3_phy_reset(tp);
2690
2691         current_link_up = 0;
2692         current_speed = SPEED_INVALID;
2693         current_duplex = DUPLEX_INVALID;
2694
2695         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2698                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2699                         bmsr |= BMSR_LSTATUS;
2700                 else
2701                         bmsr &= ~BMSR_LSTATUS;
2702         }
2703
2704         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2705
2706         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2707             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2708                 /* do nothing, just check for link up at the end */
2709         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2710                 u32 adv, new_adv;
2711
2712                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2713                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2714                                   ADVERTISE_1000XPAUSE |
2715                                   ADVERTISE_1000XPSE_ASYM |
2716                                   ADVERTISE_SLCT);
2717
2718                 /* Always advertise symmetric PAUSE just like copper */
2719                 new_adv |= ADVERTISE_1000XPAUSE;
2720
2721                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2722                         new_adv |= ADVERTISE_1000XHALF;
2723                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2724                         new_adv |= ADVERTISE_1000XFULL;
2725
2726                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2727                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2729                         tg3_writephy(tp, MII_BMCR, bmcr);
2730
2731                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2732                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2733                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734
2735                         return err;
2736                 }
2737         } else {
2738                 u32 new_bmcr;
2739
2740                 bmcr &= ~BMCR_SPEED1000;
2741                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2742
2743                 if (tp->link_config.duplex == DUPLEX_FULL)
2744                         new_bmcr |= BMCR_FULLDPLX;
2745
2746                 if (new_bmcr != bmcr) {
2747                         /* BMCR_SPEED1000 is a reserved bit that needs
2748                          * to be set on write.
2749                          */
2750                         new_bmcr |= BMCR_SPEED1000;
2751
2752                         /* Force a linkdown */
2753                         if (netif_carrier_ok(tp->dev)) {
2754                                 u32 adv;
2755
2756                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2757                                 adv &= ~(ADVERTISE_1000XFULL |
2758                                          ADVERTISE_1000XHALF |
2759                                          ADVERTISE_SLCT);
2760                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2761                                 tg3_writephy(tp, MII_BMCR, bmcr |
2762                                                            BMCR_ANRESTART |
2763                                                            BMCR_ANENABLE);
2764                                 udelay(10);
2765                                 netif_carrier_off(tp->dev);
2766                         }
2767                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2768                         bmcr = new_bmcr;
2769                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2770                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2772                             ASIC_REV_5714) {
2773                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2774                                         bmsr |= BMSR_LSTATUS;
2775                                 else
2776                                         bmsr &= ~BMSR_LSTATUS;
2777                         }
2778                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2779                 }
2780         }
2781
2782         if (bmsr & BMSR_LSTATUS) {
2783                 current_speed = SPEED_1000;
2784                 current_link_up = 1;
2785                 if (bmcr & BMCR_FULLDPLX)
2786                         current_duplex = DUPLEX_FULL;
2787                 else
2788                         current_duplex = DUPLEX_HALF;
2789
2790                 if (bmcr & BMCR_ANENABLE) {
2791                         u32 local_adv, remote_adv, common;
2792
2793                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2794                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2795                         common = local_adv & remote_adv;
2796                         if (common & (ADVERTISE_1000XHALF |
2797                                       ADVERTISE_1000XFULL)) {
2798                                 if (common & ADVERTISE_1000XFULL)
2799                                         current_duplex = DUPLEX_FULL;
2800                                 else
2801                                         current_duplex = DUPLEX_HALF;
2802
2803                                 tg3_setup_flow_control(tp, local_adv,
2804                                                        remote_adv);
2805                         }
2806                         else
2807                                 current_link_up = 0;
2808                 }
2809         }
2810
2811         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812         if (tp->link_config.active_duplex == DUPLEX_HALF)
2813                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2814
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2819
2820         tp->link_config.active_speed = current_speed;
2821         tp->link_config.active_duplex = current_duplex;
2822
2823         if (current_link_up != netif_carrier_ok(tp->dev)) {
2824                 if (current_link_up)
2825                         netif_carrier_on(tp->dev);
2826                 else {
2827                         netif_carrier_off(tp->dev);
2828                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2829                 }
2830                 tg3_link_report(tp);
2831         }
2832         return err;
2833 }
2834
2835 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2836 {
2837         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2838                 /* Give autoneg time to complete. */
2839                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2840                 return;
2841         }
2842         if (!netif_carrier_ok(tp->dev) &&
2843             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2844                 u32 bmcr;
2845
2846                 tg3_readphy(tp, MII_BMCR, &bmcr);
2847                 if (bmcr & BMCR_ANENABLE) {
2848                         u32 phy1, phy2;
2849
2850                         /* Select shadow register 0x1f */
2851                         tg3_writephy(tp, 0x1c, 0x7c00);
2852                         tg3_readphy(tp, 0x1c, &phy1);
2853
2854                         /* Select expansion interrupt status register */
2855                         tg3_writephy(tp, 0x17, 0x0f01);
2856                         tg3_readphy(tp, 0x15, &phy2);
2857                         tg3_readphy(tp, 0x15, &phy2);
2858
2859                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2860                                 /* We have signal detect and not receiving
2861                                  * config code words, link is up by parallel
2862                                  * detection.
2863                                  */
2864
2865                                 bmcr &= ~BMCR_ANENABLE;
2866                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2867                                 tg3_writephy(tp, MII_BMCR, bmcr);
2868                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2869                         }
2870                 }
2871         }
2872         else if (netif_carrier_ok(tp->dev) &&
2873                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2874                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2875                 u32 phy2;
2876
2877                 /* Select expansion interrupt status register */
2878                 tg3_writephy(tp, 0x17, 0x0f01);
2879                 tg3_readphy(tp, 0x15, &phy2);
2880                 if (phy2 & 0x20) {
2881                         u32 bmcr;
2882
2883                         /* Config code words received, turn on autoneg. */
2884                         tg3_readphy(tp, MII_BMCR, &bmcr);
2885                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2886
2887                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888
2889                 }
2890         }
2891 }
2892
2893 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2894 {
2895         int err;
2896
2897         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2898                 err = tg3_setup_fiber_phy(tp, force_reset);
2899         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2900                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2901         } else {
2902                 err = tg3_setup_copper_phy(tp, force_reset);
2903         }
2904
2905         if (tp->link_config.active_speed == SPEED_1000 &&
2906             tp->link_config.active_duplex == DUPLEX_HALF)
2907                 tw32(MAC_TX_LENGTHS,
2908                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2909                       (6 << TX_LENGTHS_IPG_SHIFT) |
2910                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2911         else
2912                 tw32(MAC_TX_LENGTHS,
2913                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2914                       (6 << TX_LENGTHS_IPG_SHIFT) |
2915                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2916
2917         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2918                 if (netif_carrier_ok(tp->dev)) {
2919                         tw32(HOSTCC_STAT_COAL_TICKS,
2920                              tp->coal.stats_block_coalesce_usecs);
2921                 } else {
2922                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2923                 }
2924         }
2925
2926         return err;
2927 }
2928
2929 /* Tigon3 never reports partial packet sends.  So we do not
2930  * need special logic to handle SKBs that have not had all
2931  * of their frags sent yet, like SunGEM does.
2932  */
2933 static void tg3_tx(struct tg3 *tp)
2934 {
2935         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2936         u32 sw_idx = tp->tx_cons;
2937
2938         while (sw_idx != hw_idx) {
2939                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2940                 struct sk_buff *skb = ri->skb;
2941                 int i;
2942
2943                 if (unlikely(skb == NULL))
2944                         BUG();
2945
2946                 pci_unmap_single(tp->pdev,
2947                                  pci_unmap_addr(ri, mapping),
2948                                  skb_headlen(skb),
2949                                  PCI_DMA_TODEVICE);
2950
2951                 ri->skb = NULL;
2952
2953                 sw_idx = NEXT_TX(sw_idx);
2954
2955                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2956                         if (unlikely(sw_idx == hw_idx))
2957                                 BUG();
2958
2959                         ri = &tp->tx_buffers[sw_idx];
2960                         if (unlikely(ri->skb != NULL))
2961                                 BUG();
2962
2963                         pci_unmap_page(tp->pdev,
2964                                        pci_unmap_addr(ri, mapping),
2965                                        skb_shinfo(skb)->frags[i].size,
2966                                        PCI_DMA_TODEVICE);
2967
2968                         sw_idx = NEXT_TX(sw_idx);
2969                 }
2970
2971                 dev_kfree_skb(skb);
2972         }
2973
2974         tp->tx_cons = sw_idx;
2975
2976         if (unlikely(netif_queue_stopped(tp->dev))) {
2977                 spin_lock(&tp->tx_lock);
2978                 if (netif_queue_stopped(tp->dev) &&
2979                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2980                         netif_wake_queue(tp->dev);
2981                 spin_unlock(&tp->tx_lock);
2982         }
2983 }
2984
2985 /* Returns size of skb allocated or < 0 on error.
2986  *
2987  * We only need to fill in the address because the other members
2988  * of the RX descriptor are invariant, see tg3_init_rings.
2989  *
2990  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2991  * posting buffers we only dirty the first cache line of the RX
2992  * descriptor (containing the address).  Whereas for the RX status
2993  * buffers the cpu only reads the last cacheline of the RX descriptor
2994  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2995  */
2996 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2997                             int src_idx, u32 dest_idx_unmasked)
2998 {
2999         struct tg3_rx_buffer_desc *desc;
3000         struct ring_info *map, *src_map;
3001         struct sk_buff *skb;
3002         dma_addr_t mapping;
3003         int skb_size, dest_idx;
3004
3005         src_map = NULL;
3006         switch (opaque_key) {
3007         case RXD_OPAQUE_RING_STD:
3008                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3009                 desc = &tp->rx_std[dest_idx];
3010                 map = &tp->rx_std_buffers[dest_idx];
3011                 if (src_idx >= 0)
3012                         src_map = &tp->rx_std_buffers[src_idx];
3013                 skb_size = tp->rx_pkt_buf_sz;
3014                 break;
3015
3016         case RXD_OPAQUE_RING_JUMBO:
3017                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3018                 desc = &tp->rx_jumbo[dest_idx];
3019                 map = &tp->rx_jumbo_buffers[dest_idx];
3020                 if (src_idx >= 0)
3021                         src_map = &tp->rx_jumbo_buffers[src_idx];
3022                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3023                 break;
3024
3025         default:
3026                 return -EINVAL;
3027         };
3028
3029         /* Do not overwrite any of the map or rp information
3030          * until we are sure we can commit to a new buffer.
3031          *
3032          * Callers depend upon this behavior and assume that
3033          * we leave everything unchanged if we fail.
3034          */
3035         skb = dev_alloc_skb(skb_size);
3036         if (skb == NULL)
3037                 return -ENOMEM;
3038
3039         skb->dev = tp->dev;
3040         skb_reserve(skb, tp->rx_offset);
3041
3042         mapping = pci_map_single(tp->pdev, skb->data,
3043                                  skb_size - tp->rx_offset,
3044                                  PCI_DMA_FROMDEVICE);
3045
3046         map->skb = skb;
3047         pci_unmap_addr_set(map, mapping, mapping);
3048
3049         if (src_map != NULL)
3050                 src_map->skb = NULL;
3051
3052         desc->addr_hi = ((u64)mapping >> 32);
3053         desc->addr_lo = ((u64)mapping & 0xffffffff);
3054
3055         return skb_size;
3056 }
3057
3058 /* We only need to move over in the address because the other
3059  * members of the RX descriptor are invariant.  See notes above
3060  * tg3_alloc_rx_skb for full details.
3061  */
3062 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3063                            int src_idx, u32 dest_idx_unmasked)
3064 {
3065         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3066         struct ring_info *src_map, *dest_map;
3067         int dest_idx;
3068
3069         switch (opaque_key) {
3070         case RXD_OPAQUE_RING_STD:
3071                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3072                 dest_desc = &tp->rx_std[dest_idx];
3073                 dest_map = &tp->rx_std_buffers[dest_idx];
3074                 src_desc = &tp->rx_std[src_idx];
3075                 src_map = &tp->rx_std_buffers[src_idx];
3076                 break;
3077
3078         case RXD_OPAQUE_RING_JUMBO:
3079                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3080                 dest_desc = &tp->rx_jumbo[dest_idx];
3081                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3082                 src_desc = &tp->rx_jumbo[src_idx];
3083                 src_map = &tp->rx_jumbo_buffers[src_idx];
3084                 break;
3085
3086         default:
3087                 return;
3088         };
3089
3090         dest_map->skb = src_map->skb;
3091         pci_unmap_addr_set(dest_map, mapping,
3092                            pci_unmap_addr(src_map, mapping));
3093         dest_desc->addr_hi = src_desc->addr_hi;
3094         dest_desc->addr_lo = src_desc->addr_lo;
3095
3096         src_map->skb = NULL;
3097 }
3098
3099 #if TG3_VLAN_TAG_USED
3100 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3101 {
3102         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3103 }
3104 #endif
3105
3106 /* The RX ring scheme is composed of multiple rings which post fresh
3107  * buffers to the chip, and one special ring the chip uses to report
3108  * status back to the host.
3109  *
3110  * The special ring reports the status of received packets to the
3111  * host.  The chip does not write into the original descriptor the
3112  * RX buffer was obtained from.  The chip simply takes the original
3113  * descriptor as provided by the host, updates the status and length
3114  * field, then writes this into the next status ring entry.
3115  *
3116  * Each ring the host uses to post buffers to the chip is described
3117  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3118  * it is first placed into the on-chip ram.  When the packet's length
3119  * is known, it walks down the TG3_BDINFO entries to select the ring.
3120  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3121  * which is within the range of the new packet's length is chosen.
3122  *
3123  * The "separate ring for rx status" scheme may sound queer, but it makes
3124  * sense from a cache coherency perspective.  If only the host writes
3125  * to the buffer post rings, and only the chip writes to the rx status
3126  * rings, then cache lines never move beyond shared-modified state.
3127  * If both the host and chip were to write into the same ring, cache line
3128  * eviction could occur since both entities want it in an exclusive state.
3129  */
3130 static int tg3_rx(struct tg3 *tp, int budget)
3131 {
3132         u32 work_mask;
3133         u32 sw_idx = tp->rx_rcb_ptr;
3134         u16 hw_idx;
3135         int received;
3136
3137         hw_idx = tp->hw_status->idx[0].rx_producer;
3138         /*
3139          * We need to order the read of hw_idx and the read of
3140          * the opaque cookie.
3141          */
3142         rmb();
3143         work_mask = 0;
3144         received = 0;
3145         while (sw_idx != hw_idx && budget > 0) {
3146                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3147                 unsigned int len;
3148                 struct sk_buff *skb;
3149                 dma_addr_t dma_addr;
3150                 u32 opaque_key, desc_idx, *post_ptr;
3151
3152                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3153                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3154                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3155                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3156                                                   mapping);
3157                         skb = tp->rx_std_buffers[desc_idx].skb;
3158                         post_ptr = &tp->rx_std_ptr;
3159                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3160                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3161                                                   mapping);
3162                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3163                         post_ptr = &tp->rx_jumbo_ptr;
3164                 }
3165                 else {
3166                         goto next_pkt_nopost;
3167                 }
3168
3169                 work_mask |= opaque_key;
3170
3171                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3172                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3173                 drop_it:
3174                         tg3_recycle_rx(tp, opaque_key,
3175                                        desc_idx, *post_ptr);
3176                 drop_it_no_recycle:
3177                         /* Other statistics kept track of by card. */
3178                         tp->net_stats.rx_dropped++;
3179                         goto next_pkt;
3180                 }
3181
3182                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3183
3184                 if (len > RX_COPY_THRESHOLD 
3185                         && tp->rx_offset == 2
3186                         /* rx_offset != 2 iff this is a 5701 card running
3187                          * in PCI-X mode [see tg3_get_invariants()] */
3188                 ) {
3189                         int skb_size;
3190
3191                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3192                                                     desc_idx, *post_ptr);
3193                         if (skb_size < 0)
3194                                 goto drop_it;
3195
3196                         pci_unmap_single(tp->pdev, dma_addr,
3197                                          skb_size - tp->rx_offset,
3198                                          PCI_DMA_FROMDEVICE);
3199
3200                         skb_put(skb, len);
3201                 } else {
3202                         struct sk_buff *copy_skb;
3203
3204                         tg3_recycle_rx(tp, opaque_key,
3205                                        desc_idx, *post_ptr);
3206
3207                         copy_skb = dev_alloc_skb(len + 2);
3208                         if (copy_skb == NULL)
3209                                 goto drop_it_no_recycle;
3210
3211                         copy_skb->dev = tp->dev;
3212                         skb_reserve(copy_skb, 2);
3213                         skb_put(copy_skb, len);
3214                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3215                         memcpy(copy_skb->data, skb->data, len);
3216                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3217
3218                         /* We'll reuse the original ring buffer. */
3219                         skb = copy_skb;
3220                 }
3221
3222                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3223                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3224                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3225                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3226                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3227                 else
3228                         skb->ip_summed = CHECKSUM_NONE;
3229
3230                 skb->protocol = eth_type_trans(skb, tp->dev);
3231 #if TG3_VLAN_TAG_USED
3232                 if (tp->vlgrp != NULL &&
3233                     desc->type_flags & RXD_FLAG_VLAN) {
3234                         tg3_vlan_rx(tp, skb,
3235                                     desc->err_vlan & RXD_VLAN_MASK);
3236                 } else
3237 #endif
3238                         netif_receive_skb(skb);
3239
3240                 tp->dev->last_rx = jiffies;
3241                 received++;
3242                 budget--;
3243
3244 next_pkt:
3245                 (*post_ptr)++;
3246 next_pkt_nopost:
3247                 sw_idx++;
3248                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3249
3250                 /* Refresh hw_idx to see if there is new work */
3251                 if (sw_idx == hw_idx) {
3252                         hw_idx = tp->hw_status->idx[0].rx_producer;
3253                         rmb();
3254                 }
3255         }
3256
3257         /* ACK the status ring. */
3258         tp->rx_rcb_ptr = sw_idx;
3259         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3260
3261         /* Refill RX ring(s). */
3262         if (work_mask & RXD_OPAQUE_RING_STD) {
3263                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3264                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3265                              sw_idx);
3266         }
3267         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3268                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3269                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3270                              sw_idx);
3271         }
3272         mmiowb();
3273
3274         return received;
3275 }
3276
3277 static int tg3_poll(struct net_device *netdev, int *budget)
3278 {
3279         struct tg3 *tp = netdev_priv(netdev);
3280         struct tg3_hw_status *sblk = tp->hw_status;
3281         int done;
3282
3283         /* handle link change and other phy events */
3284         if (!(tp->tg3_flags &
3285               (TG3_FLAG_USE_LINKCHG_REG |
3286                TG3_FLAG_POLL_SERDES))) {
3287                 if (sblk->status & SD_STATUS_LINK_CHG) {
3288                         sblk->status = SD_STATUS_UPDATED |
3289                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3290                         spin_lock(&tp->lock);
3291                         tg3_setup_phy(tp, 0);
3292                         spin_unlock(&tp->lock);
3293                 }
3294         }
3295
3296         /* run TX completion thread */
3297         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3298                 tg3_tx(tp);
3299         }
3300
3301         /* run RX thread, within the bounds set by NAPI.
3302          * All RX "locking" is done by ensuring outside
3303          * code synchronizes with dev->poll()
3304          */
3305         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3306                 int orig_budget = *budget;
3307                 int work_done;
3308
3309                 if (orig_budget > netdev->quota)
3310                         orig_budget = netdev->quota;
3311
3312                 work_done = tg3_rx(tp, orig_budget);
3313
3314                 *budget -= work_done;
3315                 netdev->quota -= work_done;
3316         }
3317
3318         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3319                 tp->last_tag = sblk->status_tag;
3320                 rmb();
3321         } else
3322                 sblk->status &= ~SD_STATUS_UPDATED;
3323
3324         /* if no more work, tell net stack and NIC we're done */
3325         done = !tg3_has_work(tp);
3326         if (done) {
3327                 netif_rx_complete(netdev);
3328                 tg3_restart_ints(tp);
3329         }
3330
3331         return (done ? 0 : 1);
3332 }
3333
3334 static void tg3_irq_quiesce(struct tg3 *tp)
3335 {
3336         BUG_ON(tp->irq_sync);
3337
3338         tp->irq_sync = 1;
3339         smp_mb();
3340
3341         synchronize_irq(tp->pdev->irq);
3342 }
3343
3344 static inline int tg3_irq_sync(struct tg3 *tp)
3345 {
3346         return tp->irq_sync;
3347 }
3348
3349 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3350  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3351  * with as well.  Most of the time, this is not necessary except when
3352  * shutting down the device.
3353  */
3354 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3355 {
3356         if (irq_sync)
3357                 tg3_irq_quiesce(tp);
3358         spin_lock_bh(&tp->lock);
3359         spin_lock(&tp->tx_lock);
3360 }
3361
3362 static inline void tg3_full_unlock(struct tg3 *tp)
3363 {
3364         spin_unlock(&tp->tx_lock);
3365         spin_unlock_bh(&tp->lock);
3366 }
3367
3368 /* MSI ISR - No need to check for interrupt sharing and no need to
3369  * flush status block and interrupt mailbox. PCI ordering rules
3370  * guarantee that MSI will arrive after the status block.
3371  */
3372 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3373 {
3374         struct net_device *dev = dev_id;
3375         struct tg3 *tp = netdev_priv(dev);
3376
3377         prefetch(tp->hw_status);
3378         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3379         /*
3380          * Writing any value to intr-mbox-0 clears PCI INTA# and
3381          * chip-internal interrupt pending events.
3382          * Writing non-zero to intr-mbox-0 additional tells the
3383          * NIC to stop sending us irqs, engaging "in-intr-handler"
3384          * event coalescing.
3385          */
3386         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3387         if (likely(!tg3_irq_sync(tp)))
3388                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3389
3390         return IRQ_RETVAL(1);
3391 }
3392
3393 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3394 {
3395         struct net_device *dev = dev_id;
3396         struct tg3 *tp = netdev_priv(dev);
3397         struct tg3_hw_status *sblk = tp->hw_status;
3398         unsigned int handled = 1;
3399
3400         /* In INTx mode, it is possible for the interrupt to arrive at
3401          * the CPU before the status block posted prior to the interrupt.
3402          * Reading the PCI State register will confirm whether the
3403          * interrupt is ours and will flush the status block.
3404          */
3405         if ((sblk->status & SD_STATUS_UPDATED) ||
3406             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3407                 /*
3408                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3409                  * chip-internal interrupt pending events.
3410                  * Writing non-zero to intr-mbox-0 additional tells the
3411                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3412                  * event coalescing.
3413                  */
3414                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3415                              0x00000001);
3416                 if (tg3_irq_sync(tp))
3417                         goto out;
3418                 sblk->status &= ~SD_STATUS_UPDATED;
3419                 if (likely(tg3_has_work(tp))) {
3420                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3421                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3422                 } else {
3423                         /* No work, shared interrupt perhaps?  re-enable
3424                          * interrupts, and flush that PCI write
3425                          */
3426                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3427                                 0x00000000);
3428                 }
3429         } else {        /* shared interrupt */
3430                 handled = 0;
3431         }
3432 out:
3433         return IRQ_RETVAL(handled);
3434 }
3435
3436 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440         struct tg3_hw_status *sblk = tp->hw_status;
3441         unsigned int handled = 1;
3442
3443         /* In INTx mode, it is possible for the interrupt to arrive at
3444          * the CPU before the status block posted prior to the interrupt.
3445          * Reading the PCI State register will confirm whether the
3446          * interrupt is ours and will flush the status block.
3447          */
3448         if ((sblk->status_tag != tp->last_tag) ||
3449             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3450                 /*
3451                  * writing any value to intr-mbox-0 clears PCI INTA# and
3452                  * chip-internal interrupt pending events.
3453                  * writing non-zero to intr-mbox-0 additional tells the
3454                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3455                  * event coalescing.
3456                  */
3457                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3458                              0x00000001);
3459                 if (tg3_irq_sync(tp))
3460                         goto out;
3461                 if (netif_rx_schedule_prep(dev)) {
3462                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3463                         /* Update last_tag to mark that this status has been
3464                          * seen. Because interrupt may be shared, we may be
3465                          * racing with tg3_poll(), so only update last_tag
3466                          * if tg3_poll() is not scheduled.
3467                          */
3468                         tp->last_tag = sblk->status_tag;
3469                         __netif_rx_schedule(dev);
3470                 }
3471         } else {        /* shared interrupt */
3472                 handled = 0;
3473         }
3474 out:
3475         return IRQ_RETVAL(handled);
3476 }
3477
3478 /* ISR for interrupt test */
3479 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3480                 struct pt_regs *regs)
3481 {
3482         struct net_device *dev = dev_id;
3483         struct tg3 *tp = netdev_priv(dev);
3484         struct tg3_hw_status *sblk = tp->hw_status;
3485
3486         if ((sblk->status & SD_STATUS_UPDATED) ||
3487             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3488                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3489                              0x00000001);
3490                 return IRQ_RETVAL(1);
3491         }
3492         return IRQ_RETVAL(0);
3493 }
3494
3495 static int tg3_init_hw(struct tg3 *);
3496 static int tg3_halt(struct tg3 *, int, int);
3497
3498 #ifdef CONFIG_NET_POLL_CONTROLLER
3499 static void tg3_poll_controller(struct net_device *dev)
3500 {
3501         struct tg3 *tp = netdev_priv(dev);
3502
3503         tg3_interrupt(tp->pdev->irq, dev, NULL);
3504 }
3505 #endif
3506
3507 static void tg3_reset_task(void *_data)
3508 {
3509         struct tg3 *tp = _data;
3510         unsigned int restart_timer;
3511
3512         tg3_full_lock(tp, 0);
3513         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3514
3515         if (!netif_running(tp->dev)) {
3516                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3517                 tg3_full_unlock(tp);
3518                 return;
3519         }
3520
3521         tg3_full_unlock(tp);
3522
3523         tg3_netif_stop(tp);
3524
3525         tg3_full_lock(tp, 1);
3526
3527         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3528         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3529
3530         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3531         tg3_init_hw(tp);
3532
3533         tg3_netif_start(tp);
3534
3535         if (restart_timer)
3536                 mod_timer(&tp->timer, jiffies + 1);
3537
3538         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3539
3540         tg3_full_unlock(tp);
3541 }
3542
3543 static void tg3_tx_timeout(struct net_device *dev)
3544 {
3545         struct tg3 *tp = netdev_priv(dev);
3546
3547         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3548                dev->name);
3549
3550         schedule_work(&tp->reset_task);
3551 }
3552
3553 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3554 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3555 {
3556         u32 base = (u32) mapping & 0xffffffff;
3557
3558         return ((base > 0xffffdcc0) &&
3559                 (base + len + 8 < base));
3560 }
3561
3562 /* Test for DMA addresses > 40-bit */
3563 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3564                                           int len)
3565 {
3566 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3567         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3568                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3569         return 0;
3570 #else
3571         return 0;
3572 #endif
3573 }
3574
3575 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3576
3577 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3578 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3579                                        u32 last_plus_one, u32 *start,
3580                                        u32 base_flags, u32 mss)
3581 {
3582         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3583         dma_addr_t new_addr = 0;
3584         u32 entry = *start;
3585         int i, ret = 0;
3586
3587         if (!new_skb) {
3588                 ret = -1;
3589         } else {
3590                 /* New SKB is guaranteed to be linear. */
3591                 entry = *start;
3592                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3593                                           PCI_DMA_TODEVICE);
3594                 /* Make sure new skb does not cross any 4G boundaries.
3595                  * Drop the packet if it does.
3596                  */
3597                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3598                         ret = -1;
3599                         dev_kfree_skb(new_skb);
3600                         new_skb = NULL;
3601                 } else {
3602                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3603                                     base_flags, 1 | (mss << 1));
3604                         *start = NEXT_TX(entry);
3605                 }
3606         }
3607
3608         /* Now clean up the sw ring entries. */
3609         i = 0;
3610         while (entry != last_plus_one) {
3611                 int len;
3612
3613                 if (i == 0)
3614                         len = skb_headlen(skb);
3615                 else
3616                         len = skb_shinfo(skb)->frags[i-1].size;
3617                 pci_unmap_single(tp->pdev,
3618                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3619                                  len, PCI_DMA_TODEVICE);
3620                 if (i == 0) {
3621                         tp->tx_buffers[entry].skb = new_skb;
3622                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3623                 } else {
3624                         tp->tx_buffers[entry].skb = NULL;
3625                 }
3626                 entry = NEXT_TX(entry);
3627                 i++;
3628         }
3629
3630         dev_kfree_skb(skb);
3631
3632         return ret;
3633 }
3634
3635 static void tg3_set_txd(struct tg3 *tp, int entry,
3636                         dma_addr_t mapping, int len, u32 flags,
3637                         u32 mss_and_is_end)
3638 {
3639         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3640         int is_end = (mss_and_is_end & 0x1);
3641         u32 mss = (mss_and_is_end >> 1);
3642         u32 vlan_tag = 0;
3643
3644         if (is_end)
3645                 flags |= TXD_FLAG_END;
3646         if (flags & TXD_FLAG_VLAN) {
3647                 vlan_tag = flags >> 16;
3648                 flags &= 0xffff;
3649         }
3650         vlan_tag |= (mss << TXD_MSS_SHIFT);
3651
3652         txd->addr_hi = ((u64) mapping >> 32);
3653         txd->addr_lo = ((u64) mapping & 0xffffffff);
3654         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3655         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3656 }
3657
3658 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3659 {
3660         struct tg3 *tp = netdev_priv(dev);
3661         dma_addr_t mapping;
3662         u32 len, entry, base_flags, mss;
3663         int would_hit_hwbug;
3664
3665         len = skb_headlen(skb);
3666
3667         /* No BH disabling for tx_lock here.  We are running in BH disabled
3668          * context and TX reclaim runs via tp->poll inside of a software
3669          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3670          * no IRQ context deadlocks to worry about either.  Rejoice!
3671          */
3672         if (!spin_trylock(&tp->tx_lock))
3673                 return NETDEV_TX_LOCKED; 
3674
3675         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3676                 if (!netif_queue_stopped(dev)) {
3677                         netif_stop_queue(dev);
3678
3679                         /* This is a hard error, log it. */
3680                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3681                                "queue awake!\n", dev->name);
3682                 }
3683                 spin_unlock(&tp->tx_lock);
3684                 return NETDEV_TX_BUSY;
3685         }
3686
3687         entry = tp->tx_prod;
3688         base_flags = 0;
3689         if (skb->ip_summed == CHECKSUM_HW)
3690                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3691 #if TG3_TSO_SUPPORT != 0
3692         mss = 0;
3693         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3694             (mss = skb_shinfo(skb)->tso_size) != 0) {
3695                 int tcp_opt_len, ip_tcp_len;
3696
3697                 if (skb_header_cloned(skb) &&
3698                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3699                         dev_kfree_skb(skb);
3700                         goto out_unlock;
3701                 }
3702
3703                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3704                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3705
3706                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3707                                TXD_FLAG_CPU_POST_DMA);
3708
3709                 skb->nh.iph->check = 0;
3710                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3711                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3712                         skb->h.th->check = 0;
3713                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3714                 }
3715                 else {
3716                         skb->h.th->check =
3717                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3718                                                    skb->nh.iph->daddr,
3719                                                    0, IPPROTO_TCP, 0);
3720                 }
3721
3722                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3723                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3724                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3725                                 int tsflags;
3726
3727                                 tsflags = ((skb->nh.iph->ihl - 5) +
3728                                            (tcp_opt_len >> 2));
3729                                 mss |= (tsflags << 11);
3730                         }
3731                 } else {
3732                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3733                                 int tsflags;
3734
3735                                 tsflags = ((skb->nh.iph->ihl - 5) +
3736                                            (tcp_opt_len >> 2));
3737                                 base_flags |= tsflags << 12;
3738                         }
3739                 }
3740         }
3741 #else
3742         mss = 0;
3743 #endif
3744 #if TG3_VLAN_TAG_USED
3745         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3746                 base_flags |= (TXD_FLAG_VLAN |
3747                                (vlan_tx_tag_get(skb) << 16));
3748 #endif
3749
3750         /* Queue skb data, a.k.a. the main skb fragment. */
3751         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3752
3753         tp->tx_buffers[entry].skb = skb;
3754         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3755
3756         would_hit_hwbug = 0;
3757
3758         if (tg3_4g_overflow_test(mapping, len))
3759                 would_hit_hwbug = 1;
3760
3761         tg3_set_txd(tp, entry, mapping, len, base_flags,
3762                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3763
3764         entry = NEXT_TX(entry);
3765
3766         /* Now loop through additional data fragments, and queue them. */
3767         if (skb_shinfo(skb)->nr_frags > 0) {
3768                 unsigned int i, last;
3769
3770                 last = skb_shinfo(skb)->nr_frags - 1;
3771                 for (i = 0; i <= last; i++) {
3772                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3773
3774                         len = frag->size;
3775                         mapping = pci_map_page(tp->pdev,
3776                                                frag->page,
3777                                                frag->page_offset,
3778                                                len, PCI_DMA_TODEVICE);
3779
3780                         tp->tx_buffers[entry].skb = NULL;
3781                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3782
3783                         if (tg3_4g_overflow_test(mapping, len))
3784                                 would_hit_hwbug = 1;
3785
3786                         if (tg3_40bit_overflow_test(tp, mapping, len))
3787                                 would_hit_hwbug = 1;
3788
3789                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3790                                 tg3_set_txd(tp, entry, mapping, len,
3791                                             base_flags, (i == last)|(mss << 1));
3792                         else
3793                                 tg3_set_txd(tp, entry, mapping, len,
3794                                             base_flags, (i == last));
3795
3796                         entry = NEXT_TX(entry);
3797                 }
3798         }
3799
3800         if (would_hit_hwbug) {
3801                 u32 last_plus_one = entry;
3802                 u32 start;
3803
3804                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3805                 start &= (TG3_TX_RING_SIZE - 1);
3806
3807                 /* If the workaround fails due to memory/mapping
3808                  * failure, silently drop this packet.
3809                  */
3810                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3811                                                 &start, base_flags, mss))
3812                         goto out_unlock;
3813
3814                 entry = start;
3815         }
3816
3817         /* Packets are ready, update Tx producer idx local and on card. */
3818         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3819
3820         tp->tx_prod = entry;
3821         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3822                 netif_stop_queue(dev);
3823                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3824                         netif_wake_queue(tp->dev);
3825         }
3826
3827 out_unlock:
3828         mmiowb();
3829         spin_unlock(&tp->tx_lock);
3830
3831         dev->trans_start = jiffies;
3832
3833         return NETDEV_TX_OK;
3834 }
3835
3836 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3837                                int new_mtu)
3838 {
3839         dev->mtu = new_mtu;
3840
3841         if (new_mtu > ETH_DATA_LEN) {
3842                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3843                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3844                         ethtool_op_set_tso(dev, 0);
3845                 }
3846                 else
3847                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3848         } else {
3849                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3850                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3851                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3852         }
3853 }
3854
3855 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3856 {
3857         struct tg3 *tp = netdev_priv(dev);
3858
3859         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3860                 return -EINVAL;
3861
3862         if (!netif_running(dev)) {
3863                 /* We'll just catch it later when the
3864                  * device is up'd.
3865                  */
3866                 tg3_set_mtu(dev, tp, new_mtu);
3867                 return 0;
3868         }
3869
3870         tg3_netif_stop(tp);
3871
3872         tg3_full_lock(tp, 1);
3873
3874         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3875
3876         tg3_set_mtu(dev, tp, new_mtu);
3877
3878         tg3_init_hw(tp);
3879
3880         tg3_netif_start(tp);
3881
3882         tg3_full_unlock(tp);
3883
3884         return 0;
3885 }
3886
3887 /* Free up pending packets in all rx/tx rings.
3888  *
3889  * The chip has been shut down and the driver detached from
3890  * the networking, so no interrupts or new tx packets will
3891  * end up in the driver.  tp->{tx,}lock is not held and we are not
3892  * in an interrupt context and thus may sleep.
3893  */
3894 static void tg3_free_rings(struct tg3 *tp)
3895 {
3896         struct ring_info *rxp;
3897         int i;
3898
3899         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3900                 rxp = &tp->rx_std_buffers[i];
3901
3902                 if (rxp->skb == NULL)
3903                         continue;
3904                 pci_unmap_single(tp->pdev,
3905                                  pci_unmap_addr(rxp, mapping),
3906                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3907                                  PCI_DMA_FROMDEVICE);
3908                 dev_kfree_skb_any(rxp->skb);
3909                 rxp->skb = NULL;
3910         }
3911
3912         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3913                 rxp = &tp->rx_jumbo_buffers[i];
3914
3915                 if (rxp->skb == NULL)
3916                         continue;
3917                 pci_unmap_single(tp->pdev,
3918                                  pci_unmap_addr(rxp, mapping),
3919                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3920                                  PCI_DMA_FROMDEVICE);
3921                 dev_kfree_skb_any(rxp->skb);
3922                 rxp->skb = NULL;
3923         }
3924
3925         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3926                 struct tx_ring_info *txp;
3927                 struct sk_buff *skb;
3928                 int j;
3929
3930                 txp = &tp->tx_buffers[i];
3931                 skb = txp->skb;
3932
3933                 if (skb == NULL) {
3934                         i++;
3935                         continue;
3936                 }
3937
3938                 pci_unmap_single(tp->pdev,
3939                                  pci_unmap_addr(txp, mapping),
3940                                  skb_headlen(skb),
3941                                  PCI_DMA_TODEVICE);
3942                 txp->skb = NULL;
3943
3944                 i++;
3945
3946                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3947                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3948                         pci_unmap_page(tp->pdev,
3949                                        pci_unmap_addr(txp, mapping),
3950                                        skb_shinfo(skb)->frags[j].size,
3951                                        PCI_DMA_TODEVICE);
3952                         i++;
3953                 }
3954
3955                 dev_kfree_skb_any(skb);
3956         }
3957 }
3958
3959 /* Initialize tx/rx rings for packet processing.
3960  *
3961  * The chip has been shut down and the driver detached from
3962  * the networking, so no interrupts or new tx packets will
3963  * end up in the driver.  tp->{tx,}lock are held and thus
3964  * we may not sleep.
3965  */
3966 static void tg3_init_rings(struct tg3 *tp)
3967 {
3968         u32 i;
3969
3970         /* Free up all the SKBs. */
3971         tg3_free_rings(tp);
3972
3973         /* Zero out all descriptors. */
3974         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3975         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3976         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3977         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3978
3979         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3980         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3981             (tp->dev->mtu > ETH_DATA_LEN))
3982                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3983
3984         /* Initialize invariants of the rings, we only set this
3985          * stuff once.  This works because the card does not
3986          * write into the rx buffer posting rings.
3987          */
3988         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3989                 struct tg3_rx_buffer_desc *rxd;
3990
3991                 rxd = &tp->rx_std[i];
3992                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3993                         << RXD_LEN_SHIFT;
3994                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3995                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3996                                (i << RXD_OPAQUE_INDEX_SHIFT));
3997         }
3998
3999         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4000                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4001                         struct tg3_rx_buffer_desc *rxd;
4002
4003                         rxd = &tp->rx_jumbo[i];
4004                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4005                                 << RXD_LEN_SHIFT;
4006                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4007                                 RXD_FLAG_JUMBO;
4008                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4009                                (i << RXD_OPAQUE_INDEX_SHIFT));
4010                 }
4011         }
4012
4013         /* Now allocate fresh SKBs for each rx ring. */
4014         for (i = 0; i < tp->rx_pending; i++) {
4015                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4016                                      -1, i) < 0)
4017                         break;
4018         }
4019
4020         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4021                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4022                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4023                                              -1, i) < 0)
4024                                 break;
4025                 }
4026         }
4027 }
4028
4029 /*
4030  * Must not be invoked with interrupt sources disabled and
4031  * the hardware shutdown down.
4032  */
4033 static void tg3_free_consistent(struct tg3 *tp)
4034 {
4035         kfree(tp->rx_std_buffers);
4036         tp->rx_std_buffers = NULL;
4037         if (tp->rx_std) {
4038                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4039                                     tp->rx_std, tp->rx_std_mapping);
4040                 tp->rx_std = NULL;
4041         }
4042         if (tp->rx_jumbo) {
4043                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4044                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4045                 tp->rx_jumbo = NULL;
4046         }
4047         if (tp->rx_rcb) {
4048                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4049                                     tp->rx_rcb, tp->rx_rcb_mapping);
4050                 tp->rx_rcb = NULL;
4051         }
4052         if (tp->tx_ring) {
4053                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4054                         tp->tx_ring, tp->tx_desc_mapping);
4055                 tp->tx_ring = NULL;
4056         }
4057         if (tp->hw_status) {
4058                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4059                                     tp->hw_status, tp->status_mapping);
4060                 tp->hw_status = NULL;
4061         }
4062         if (tp->hw_stats) {
4063                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4064                                     tp->hw_stats, tp->stats_mapping);
4065                 tp->hw_stats = NULL;
4066         }
4067 }
4068
4069 /*
4070  * Must not be invoked with interrupt sources disabled and
4071  * the hardware shutdown down.  Can sleep.
4072  */
4073 static int tg3_alloc_consistent(struct tg3 *tp)
4074 {
4075         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4076                                       (TG3_RX_RING_SIZE +
4077                                        TG3_RX_JUMBO_RING_SIZE)) +
4078                                      (sizeof(struct tx_ring_info) *
4079                                       TG3_TX_RING_SIZE),
4080                                      GFP_KERNEL);
4081         if (!tp->rx_std_buffers)
4082                 return -ENOMEM;
4083
4084         memset(tp->rx_std_buffers, 0,
4085                (sizeof(struct ring_info) *
4086                 (TG3_RX_RING_SIZE +
4087                  TG3_RX_JUMBO_RING_SIZE)) +
4088                (sizeof(struct tx_ring_info) *
4089                 TG3_TX_RING_SIZE));
4090
4091         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4092         tp->tx_buffers = (struct tx_ring_info *)
4093                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4094
4095         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4096                                           &tp->rx_std_mapping);
4097         if (!tp->rx_std)
4098                 goto err_out;
4099
4100         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4101                                             &tp->rx_jumbo_mapping);
4102
4103         if (!tp->rx_jumbo)
4104                 goto err_out;
4105
4106         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4107                                           &tp->rx_rcb_mapping);
4108         if (!tp->rx_rcb)
4109                 goto err_out;
4110
4111         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4112                                            &tp->tx_desc_mapping);
4113         if (!tp->tx_ring)
4114                 goto err_out;
4115
4116         tp->hw_status = pci_alloc_consistent(tp->pdev,
4117                                              TG3_HW_STATUS_SIZE,
4118                                              &tp->status_mapping);
4119         if (!tp->hw_status)
4120                 goto err_out;
4121
4122         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4123                                             sizeof(struct tg3_hw_stats),
4124                                             &tp->stats_mapping);
4125         if (!tp->hw_stats)
4126                 goto err_out;
4127
4128         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4129         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4130
4131         return 0;
4132
4133 err_out:
4134         tg3_free_consistent(tp);
4135         return -ENOMEM;
4136 }
4137
4138 #define MAX_WAIT_CNT 1000
4139
4140 /* To stop a block, clear the enable bit and poll till it
4141  * clears.  tp->lock is held.
4142  */
4143 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4144 {
4145         unsigned int i;
4146         u32 val;
4147
4148         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4149                 switch (ofs) {
4150                 case RCVLSC_MODE:
4151                 case DMAC_MODE:
4152                 case MBFREE_MODE:
4153                 case BUFMGR_MODE:
4154                 case MEMARB_MODE:
4155                         /* We can't enable/disable these bits of the
4156                          * 5705/5750, just say success.
4157                          */
4158                         return 0;
4159
4160                 default:
4161                         break;
4162                 };
4163         }
4164
4165         val = tr32(ofs);
4166         val &= ~enable_bit;
4167         tw32_f(ofs, val);
4168
4169         for (i = 0; i < MAX_WAIT_CNT; i++) {
4170                 udelay(100);
4171                 val = tr32(ofs);
4172                 if ((val & enable_bit) == 0)
4173                         break;
4174         }
4175
4176         if (i == MAX_WAIT_CNT && !silent) {
4177                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4178                        "ofs=%lx enable_bit=%x\n",
4179                        ofs, enable_bit);
4180                 return -ENODEV;
4181         }
4182
4183         return 0;
4184 }
4185
4186 /* tp->lock is held. */
4187 static int tg3_abort_hw(struct tg3 *tp, int silent)
4188 {
4189         int i, err;
4190
4191         tg3_disable_ints(tp);
4192
4193         tp->rx_mode &= ~RX_MODE_ENABLE;
4194         tw32_f(MAC_RX_MODE, tp->rx_mode);
4195         udelay(10);
4196
4197         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4198         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4199         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4200         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4201         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4202         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4203
4204         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4205         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4206         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4207         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4208         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4209         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4210         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4211
4212         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4213         tw32_f(MAC_MODE, tp->mac_mode);
4214         udelay(40);
4215
4216         tp->tx_mode &= ~TX_MODE_ENABLE;
4217         tw32_f(MAC_TX_MODE, tp->tx_mode);
4218
4219         for (i = 0; i < MAX_WAIT_CNT; i++) {
4220                 udelay(100);
4221                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4222                         break;
4223         }
4224         if (i >= MAX_WAIT_CNT) {
4225                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4226                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4227                        tp->dev->name, tr32(MAC_TX_MODE));
4228                 err |= -ENODEV;
4229         }
4230
4231         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4232         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4233         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4234
4235         tw32(FTQ_RESET, 0xffffffff);
4236         tw32(FTQ_RESET, 0x00000000);
4237
4238         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4239         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4240
4241         if (tp->hw_status)
4242                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4243         if (tp->hw_stats)
4244                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4245
4246         return err;
4247 }
4248
4249 /* tp->lock is held. */
4250 static int tg3_nvram_lock(struct tg3 *tp)
4251 {
4252         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4253                 int i;
4254
4255                 if (tp->nvram_lock_cnt == 0) {
4256                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4257                         for (i = 0; i < 8000; i++) {
4258                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4259                                         break;
4260                                 udelay(20);
4261                         }
4262                         if (i == 8000) {
4263                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4264                                 return -ENODEV;
4265                         }
4266                 }
4267                 tp->nvram_lock_cnt++;
4268         }
4269         return 0;
4270 }
4271
4272 /* tp->lock is held. */
4273 static void tg3_nvram_unlock(struct tg3 *tp)
4274 {
4275         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4276                 if (tp->nvram_lock_cnt > 0)
4277                         tp->nvram_lock_cnt--;
4278                 if (tp->nvram_lock_cnt == 0)
4279                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4280         }
4281 }
4282
4283 /* tp->lock is held. */
4284 static void tg3_enable_nvram_access(struct tg3 *tp)
4285 {
4286         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4287             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4288                 u32 nvaccess = tr32(NVRAM_ACCESS);
4289
4290                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4291         }
4292 }
4293
4294 /* tp->lock is held. */
4295 static void tg3_disable_nvram_access(struct tg3 *tp)
4296 {
4297         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4298             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4299                 u32 nvaccess = tr32(NVRAM_ACCESS);
4300
4301                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4302         }
4303 }
4304
4305 /* tp->lock is held. */
4306 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4307 {
4308         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4309                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4310                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4311
4312         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4313                 switch (kind) {
4314                 case RESET_KIND_INIT:
4315                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4316                                       DRV_STATE_START);
4317                         break;
4318
4319                 case RESET_KIND_SHUTDOWN:
4320                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4321                                       DRV_STATE_UNLOAD);
4322                         break;
4323
4324                 case RESET_KIND_SUSPEND:
4325                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4326                                       DRV_STATE_SUSPEND);
4327                         break;
4328
4329                 default:
4330                         break;
4331                 };
4332         }
4333 }
4334
4335 /* tp->lock is held. */
4336 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4337 {
4338         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4339                 switch (kind) {
4340                 case RESET_KIND_INIT:
4341                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4342                                       DRV_STATE_START_DONE);
4343                         break;
4344
4345                 case RESET_KIND_SHUTDOWN:
4346                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4347                                       DRV_STATE_UNLOAD_DONE);
4348                         break;
4349
4350                 default:
4351                         break;
4352                 };
4353         }
4354 }
4355
4356 /* tp->lock is held. */
4357 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4358 {
4359         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4360                 switch (kind) {
4361                 case RESET_KIND_INIT:
4362                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4363                                       DRV_STATE_START);
4364                         break;
4365
4366                 case RESET_KIND_SHUTDOWN:
4367                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4368                                       DRV_STATE_UNLOAD);
4369                         break;
4370
4371                 case RESET_KIND_SUSPEND:
4372                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4373                                       DRV_STATE_SUSPEND);
4374                         break;
4375
4376                 default:
4377                         break;
4378                 };
4379         }
4380 }
4381
4382 static void tg3_stop_fw(struct tg3 *);
4383
4384 /* tp->lock is held. */
4385 static int tg3_chip_reset(struct tg3 *tp)
4386 {
4387         u32 val;
4388         void (*write_op)(struct tg3 *, u32, u32);
4389         int i;
4390
4391         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4392                 tg3_nvram_lock(tp);
4393                 /* No matching tg3_nvram_unlock() after this because
4394                  * chip reset below will undo the nvram lock.
4395                  */
4396                 tp->nvram_lock_cnt = 0;
4397         }
4398
4399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4400             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4401                 tw32(GRC_FASTBOOT_PC, 0);
4402
4403         /*
4404          * We must avoid the readl() that normally takes place.
4405          * It locks machines, causes machine checks, and other
4406          * fun things.  So, temporarily disable the 5701
4407          * hardware workaround, while we do the reset.
4408          */
4409         write_op = tp->write32;
4410         if (write_op == tg3_write_flush_reg32)
4411                 tp->write32 = tg3_write32;
4412
4413         /* do the reset */
4414         val = GRC_MISC_CFG_CORECLK_RESET;
4415
4416         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4417                 if (tr32(0x7e2c) == 0x60) {
4418                         tw32(0x7e2c, 0x20);
4419                 }
4420                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4421                         tw32(GRC_MISC_CFG, (1 << 29));
4422                         val |= (1 << 29);
4423                 }
4424         }
4425
4426         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4427                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4428         tw32(GRC_MISC_CFG, val);
4429
4430         /* restore 5701 hardware bug workaround write method */
4431         tp->write32 = write_op;
4432
4433         /* Unfortunately, we have to delay before the PCI read back.
4434          * Some 575X chips even will not respond to a PCI cfg access
4435          * when the reset command is given to the chip.
4436          *
4437          * How do these hardware designers expect things to work
4438          * properly if the PCI write is posted for a long period
4439          * of time?  It is always necessary to have some method by
4440          * which a register read back can occur to push the write
4441          * out which does the reset.
4442          *
4443          * For most tg3 variants the trick below was working.
4444          * Ho hum...
4445          */
4446         udelay(120);
4447
4448         /* Flush PCI posted writes.  The normal MMIO registers
4449          * are inaccessible at this time so this is the only
4450          * way to make this reliably (actually, this is no longer
4451          * the case, see above).  I tried to use indirect
4452          * register read/write but this upset some 5701 variants.
4453          */
4454         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4455
4456         udelay(120);
4457
4458         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4459                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4460                         int i;
4461                         u32 cfg_val;
4462
4463                         /* Wait for link training to complete.  */
4464                         for (i = 0; i < 5000; i++)
4465                                 udelay(100);
4466
4467                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4468                         pci_write_config_dword(tp->pdev, 0xc4,
4469                                                cfg_val | (1 << 15));
4470                 }
4471                 /* Set PCIE max payload size and clear error status.  */
4472                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4473         }
4474
4475         /* Re-enable indirect register accesses. */
4476         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4477                                tp->misc_host_ctrl);
4478
4479         /* Set MAX PCI retry to zero. */
4480         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4481         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4482             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4483                 val |= PCISTATE_RETRY_SAME_DMA;
4484         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4485
4486         pci_restore_state(tp->pdev);
4487
4488         /* Make sure PCI-X relaxed ordering bit is clear. */
4489         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4490         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4491         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4492
4493         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4494                 u32 val;
4495
4496                 /* Chip reset on 5780 will reset MSI enable bit,
4497                  * so need to restore it.
4498                  */
4499                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4500                         u16 ctrl;
4501
4502                         pci_read_config_word(tp->pdev,
4503                                              tp->msi_cap + PCI_MSI_FLAGS,
4504                                              &ctrl);
4505                         pci_write_config_word(tp->pdev,
4506                                               tp->msi_cap + PCI_MSI_FLAGS,
4507                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4508                         val = tr32(MSGINT_MODE);
4509                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4510                 }
4511
4512                 val = tr32(MEMARB_MODE);
4513                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4514
4515         } else
4516                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4517
4518         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4519                 tg3_stop_fw(tp);
4520                 tw32(0x5000, 0x400);
4521         }
4522
4523         tw32(GRC_MODE, tp->grc_mode);
4524
4525         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4526                 u32 val = tr32(0xc4);
4527
4528                 tw32(0xc4, val | (1 << 15));
4529         }
4530
4531         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4533                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4534                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4535                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4536                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4537         }
4538
4539         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4540                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4541                 tw32_f(MAC_MODE, tp->mac_mode);
4542         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4543                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4544                 tw32_f(MAC_MODE, tp->mac_mode);
4545         } else
4546                 tw32_f(MAC_MODE, 0);
4547         udelay(40);
4548
4549         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4550                 /* Wait for firmware initialization to complete. */
4551                 for (i = 0; i < 100000; i++) {
4552                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4553                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4554                                 break;
4555                         udelay(10);
4556                 }
4557                 if (i >= 100000) {
4558                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4559                                "firmware will not restart magic=%08x\n",
4560                                tp->dev->name, val);
4561                         return -ENODEV;
4562                 }
4563         }
4564
4565         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4566             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4567                 u32 val = tr32(0x7c00);
4568
4569                 tw32(0x7c00, val | (1 << 25));
4570         }
4571
4572         /* Reprobe ASF enable state.  */
4573         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4574         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4575         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4576         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4577                 u32 nic_cfg;
4578
4579                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4580                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4581                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4582                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4583                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4584                 }
4585         }
4586
4587         return 0;
4588 }
4589
4590 /* tp->lock is held. */
4591 static void tg3_stop_fw(struct tg3 *tp)
4592 {
4593         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4594                 u32 val;
4595                 int i;
4596
4597                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4598                 val = tr32(GRC_RX_CPU_EVENT);
4599                 val |= (1 << 14);
4600                 tw32(GRC_RX_CPU_EVENT, val);
4601
4602                 /* Wait for RX cpu to ACK the event.  */
4603                 for (i = 0; i < 100; i++) {
4604                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4605                                 break;
4606                         udelay(1);
4607                 }
4608         }
4609 }
4610
4611 /* tp->lock is held. */
4612 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4613 {
4614         int err;
4615
4616         tg3_stop_fw(tp);
4617
4618         tg3_write_sig_pre_reset(tp, kind);
4619
4620         tg3_abort_hw(tp, silent);
4621         err = tg3_chip_reset(tp);
4622
4623         tg3_write_sig_legacy(tp, kind);
4624         tg3_write_sig_post_reset(tp, kind);
4625
4626         if (err)
4627                 return err;
4628
4629         return 0;
4630 }
4631
4632 #define TG3_FW_RELEASE_MAJOR    0x0
4633 #define TG3_FW_RELASE_MINOR     0x0
4634 #define TG3_FW_RELEASE_FIX      0x0
4635 #define TG3_FW_START_ADDR       0x08000000
4636 #define TG3_FW_TEXT_ADDR        0x08000000
4637 #define TG3_FW_TEXT_LEN         0x9c0
4638 #define TG3_FW_RODATA_ADDR      0x080009c0
4639 #define TG3_FW_RODATA_LEN       0x60
4640 #define TG3_FW_DATA_ADDR        0x08000a40
4641 #define TG3_FW_DATA_LEN         0x20
4642 #define TG3_FW_SBSS_ADDR        0x08000a60
4643 #define TG3_FW_SBSS_LEN         0xc
4644 #define TG3_FW_BSS_ADDR         0x08000a70
4645 #define TG3_FW_BSS_LEN          0x10
4646
4647 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4648         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4649         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4650         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4651         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4652         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4653         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4654         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4655         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4656         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4657         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4658         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4659         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4660         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4661         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4662         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4663         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4664         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4665         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4666         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4667         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4668         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4669         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4670         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4671         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4672         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4673         0, 0, 0, 0, 0, 0,
4674         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4675         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4676         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4677         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4678         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4679         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4680         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4681         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4682         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4683         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4684         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4685         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4686         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4687         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4688         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4689         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4690         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4691         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4692         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4693         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4694         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4695         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4696         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4697         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4698         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4699         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4700         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4701         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4702         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4703         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4704         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4705         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4706         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4707         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4708         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4709         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4710         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4711         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4712         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4713         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4714         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4715         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4716         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4717         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4718         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4719         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4720         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4721         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4722         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4723         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4724         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4725         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4726         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4727         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4728         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4729         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4730         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4731         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4732         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4733         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4734         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4735         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4736         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4737         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4738         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4739 };
4740
4741 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4742         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4743         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4744         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4745         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4746         0x00000000
4747 };
4748
4749 #if 0 /* All zeros, don't eat up space with it. */
4750 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4751         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4752         0x00000000, 0x00000000, 0x00000000, 0x00000000
4753 };
4754 #endif
4755
4756 #define RX_CPU_SCRATCH_BASE     0x30000
4757 #define RX_CPU_SCRATCH_SIZE     0x04000
4758 #define TX_CPU_SCRATCH_BASE     0x34000
4759 #define TX_CPU_SCRATCH_SIZE     0x04000
4760
4761 /* tp->lock is held. */
4762 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4763 {
4764         int i;
4765
4766         if (offset == TX_CPU_BASE &&
4767             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4768                 BUG();
4769
4770         if (offset == RX_CPU_BASE) {
4771                 for (i = 0; i < 10000; i++) {
4772                         tw32(offset + CPU_STATE, 0xffffffff);
4773                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4774                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4775                                 break;
4776                 }
4777
4778                 tw32(offset + CPU_STATE, 0xffffffff);
4779                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4780                 udelay(10);
4781         } else {
4782                 for (i = 0; i < 10000; i++) {
4783                         tw32(offset + CPU_STATE, 0xffffffff);
4784                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4785                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4786                                 break;
4787                 }
4788         }
4789
4790         if (i >= 10000) {
4791                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4792                        "and %s CPU\n",
4793                        tp->dev->name,
4794                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4795                 return -ENODEV;
4796         }
4797
4798         /* Clear firmware's nvram arbitration. */
4799         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4800                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4801         return 0;
4802 }
4803
4804 struct fw_info {
4805         unsigned int text_base;
4806         unsigned int text_len;
4807         u32 *text_data;
4808         unsigned int rodata_base;
4809         unsigned int rodata_len;
4810         u32 *rodata_data;
4811         unsigned int data_base;
4812         unsigned int data_len;
4813         u32 *data_data;
4814 };
4815
4816 /* tp->lock is held. */
4817 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4818                                  int cpu_scratch_size, struct fw_info *info)
4819 {
4820         int err, lock_err, i;
4821         void (*write_op)(struct tg3 *, u32, u32);
4822
4823         if (cpu_base == TX_CPU_BASE &&
4824             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4825                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4826                        "TX cpu firmware on %s which is 5705.\n",
4827                        tp->dev->name);
4828                 return -EINVAL;
4829         }
4830
4831         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4832                 write_op = tg3_write_mem;
4833         else
4834                 write_op = tg3_write_indirect_reg32;
4835
4836         /* It is possible that bootcode is still loading at this point.
4837          * Get the nvram lock first before halting the cpu.
4838          */
4839         lock_err = tg3_nvram_lock(tp);
4840         err = tg3_halt_cpu(tp, cpu_base);
4841         if (!lock_err)
4842                 tg3_nvram_unlock(tp);
4843         if (err)
4844                 goto out;
4845
4846         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4847                 write_op(tp, cpu_scratch_base + i, 0);
4848         tw32(cpu_base + CPU_STATE, 0xffffffff);
4849         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4850         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4851                 write_op(tp, (cpu_scratch_base +
4852                               (info->text_base & 0xffff) +
4853                               (i * sizeof(u32))),
4854                          (info->text_data ?
4855                           info->text_data[i] : 0));
4856         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4857                 write_op(tp, (cpu_scratch_base +
4858                               (info->rodata_base & 0xffff) +
4859                               (i * sizeof(u32))),
4860                          (info->rodata_data ?
4861                           info->rodata_data[i] : 0));
4862         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4863                 write_op(tp, (cpu_scratch_base +
4864                               (info->data_base & 0xffff) +
4865                               (i * sizeof(u32))),
4866                          (info->data_data ?
4867                           info->data_data[i] : 0));
4868
4869         err = 0;
4870
4871 out:
4872         return err;
4873 }
4874
4875 /* tp->lock is held. */
4876 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4877 {
4878         struct fw_info info;
4879         int err, i;
4880
4881         info.text_base = TG3_FW_TEXT_ADDR;
4882         info.text_len = TG3_FW_TEXT_LEN;
4883         info.text_data = &tg3FwText[0];
4884         info.rodata_base = TG3_FW_RODATA_ADDR;
4885         info.rodata_len = TG3_FW_RODATA_LEN;
4886         info.rodata_data = &tg3FwRodata[0];
4887         info.data_base = TG3_FW_DATA_ADDR;
4888         info.data_len = TG3_FW_DATA_LEN;
4889         info.data_data = NULL;
4890
4891         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4892                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4893                                     &info);
4894         if (err)
4895                 return err;
4896
4897         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4898                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4899                                     &info);
4900         if (err)
4901                 return err;
4902
4903         /* Now startup only the RX cpu. */
4904         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4905         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4906
4907         for (i = 0; i < 5; i++) {
4908                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4909                         break;
4910                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4911                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4912                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4913                 udelay(1000);
4914         }
4915         if (i >= 5) {
4916                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4917                        "to set RX CPU PC, is %08x should be %08x\n",
4918                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4919                        TG3_FW_TEXT_ADDR);
4920                 return -ENODEV;
4921         }
4922         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4923         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4924
4925         return 0;
4926 }
4927
4928 #if TG3_TSO_SUPPORT != 0
4929
4930 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4931 #define TG3_TSO_FW_RELASE_MINOR         0x6
4932 #define TG3_TSO_FW_RELEASE_FIX          0x0
4933 #define TG3_TSO_FW_START_ADDR           0x08000000
4934 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4935 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4936 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4937 #define TG3_TSO_FW_RODATA_LEN           0x60
4938 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4939 #define TG3_TSO_FW_DATA_LEN             0x30
4940 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4941 #define TG3_TSO_FW_SBSS_LEN             0x2c
4942 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4943 #define TG3_TSO_FW_BSS_LEN              0x894
4944
4945 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4946         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4947         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4948         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4949         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4950         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4951         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4952         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4953         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4954         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4955         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4956         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4957         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4958         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4959         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4960         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4961         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4962         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4963         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4964         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4965         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4966         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4967         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4968         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4969         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4970         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4971         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4972         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4973         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4974         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4975         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4976         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4977         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4978         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4979         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4980         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4981         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4982         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4983         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4984         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4985         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4986         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4987         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4988         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4989         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4990         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4991         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4992         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4993         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4994         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4995         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4996         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4997         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4998         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4999         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5000         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5001         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5002         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5003         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5004         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5005         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5006         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5007         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5008         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5009         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5010         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5011         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5012         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5013         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5014         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5015         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5016         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5017         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5018         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5019         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5020         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5021         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5022         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5023         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5024         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5025         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5026         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5027         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5028         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5029         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5030         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5031         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5032         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5033         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5034         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5035         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5036         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5037         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5038         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5039         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5040         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5041         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5042         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5043         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5044         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5045         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5046         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5047         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5048         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5049         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5050         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5051         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5052         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5053         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5054         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5055         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5056         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5057         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5058         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5059         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5060         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5061         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5062         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5063         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5064         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5065         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5066         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5067         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5068         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5069         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5070         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5071         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5072         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5073         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5074         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5075         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5076         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5077         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5078         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5079         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5080         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5081         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5082         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5083         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5084         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5085         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5086         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5087         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5088         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5089         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5090         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5091         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5092         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5093         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5094         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5095         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5096         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5097         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5098         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5099         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5100         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5101         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5102         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5103         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5104         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5105         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5106         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5107         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5108         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5109         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5110         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5111         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5112         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5113         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5114         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5115         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5116         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5117         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5118         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5119         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5120         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5121         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5122         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5123         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5124         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5125         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5126         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5127         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5128         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5129         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5130         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5131         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5132         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5133         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5134         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5135         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5136         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5137         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5138         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5139         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5140         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5141         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5142         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5143         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5144         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5145         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5146         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5147         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5148         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5149         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5150         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5151         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5152         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5153         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5154         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5155         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5156         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5157         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5158         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5159         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5160         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5161         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5162         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5163         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5164         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5165         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5166         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5167         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5168         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5169         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5170         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5171         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5172         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5173         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5174         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5175         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5176         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5177         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5178         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5179         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5180         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5181         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5182         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5183         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5184         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5185         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5186         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5187         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5188         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5189         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5190         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5191         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5192         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5193         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5194         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5195         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5196         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5197         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5198         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5199         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5200         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5201         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5202         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5203         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5204         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5205         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5206         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5207         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5208         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5209         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5210         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5211         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5212         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5213         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5214         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5215         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5216         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5217         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5218         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5219         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5220         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5221         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5222         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5223         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5224         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5225         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5226         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5227         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5228         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5229         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5230 };
5231
5232 static u32 tg3TsoFwRodata[] = {
5233         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5234         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5235         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5236         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5237         0x00000000,
5238 };
5239
5240 static u32 tg3TsoFwData[] = {
5241         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5242         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5243         0x00000000,
5244 };
5245
5246 /* 5705 needs a special version of the TSO firmware.  */
5247 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5248 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5249 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5250 #define TG3_TSO5_FW_START_ADDR          0x00010000
5251 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5252 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5253 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5254 #define TG3_TSO5_FW_RODATA_LEN          0x50
5255 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5256 #define TG3_TSO5_FW_DATA_LEN            0x20
5257 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5258 #define TG3_TSO5_FW_SBSS_LEN            0x28
5259 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5260 #define TG3_TSO5_FW_BSS_LEN             0x88
5261
5262 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5263         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5264         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5265         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5266         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5267         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5268         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5269         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5270         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5271         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5272         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5273         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5274         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5275         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5276         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5277         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5278         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5279         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5280         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5281         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5282         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5283         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5284         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5285         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5286         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5287         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5288         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5289         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5290         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5291         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5292         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5293         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5294         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5295         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5296         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5297         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5298         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5299         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5300         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5301         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5302         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5303         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5304         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5305         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5306         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5307         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5308         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5309         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5310         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5311         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5312         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5313         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5314         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5315         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5316         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5317         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5318         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5319         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5320         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5321         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5322         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5323         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5324         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5325         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5326         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5327         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5328         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5329         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5330         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5331         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5332         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5333         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5334         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5335         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5336         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5337         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5338         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5339         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5340         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5341         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5342         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5343         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5344         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5345         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5346         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5347         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5348         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5349         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5350         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5351         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5352         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5353         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5354         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5355         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5356         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5357         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5358         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5359         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5360         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5361         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5362         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5363         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5364         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5365         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5366         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5367         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5368         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5369         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5370         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5371         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5372         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5373         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5374         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5375         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5376         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5377         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5378         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5379         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5380         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5381         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5382         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5383         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5384         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5385         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5386         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5387         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5388         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5389         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5390         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5391         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5392         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5393         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5394         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5395         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5396         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5397         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5398         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5399         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5400         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5401         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5402         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5403         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5404         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5405         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5406         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5407         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5408         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5409         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5410         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5411         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5412         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5413         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5414         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5415         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5416         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5417         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5418         0x00000000, 0x00000000, 0x00000000,
5419 };
5420
5421 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5422         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5423         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5424         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5425         0x00000000, 0x00000000, 0x00000000,
5426 };
5427
5428 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5429         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5430         0x00000000, 0x00000000, 0x00000000,
5431 };
5432
5433 /* tp->lock is held. */
5434 static int tg3_load_tso_firmware(struct tg3 *tp)
5435 {
5436         struct fw_info info;
5437         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5438         int err, i;
5439
5440         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5441                 return 0;
5442
5443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5444                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5445                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5446                 info.text_data = &tg3Tso5FwText[0];
5447                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5448                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5449                 info.rodata_data = &tg3Tso5FwRodata[0];
5450                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5451                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5452                 info.data_data = &tg3Tso5FwData[0];
5453                 cpu_base = RX_CPU_BASE;
5454                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5455                 cpu_scratch_size = (info.text_len +
5456                                     info.rodata_len +
5457                                     info.data_len +
5458                                     TG3_TSO5_FW_SBSS_LEN +
5459                                     TG3_TSO5_FW_BSS_LEN);
5460         } else {
5461                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5462                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5463                 info.text_data = &tg3TsoFwText[0];
5464                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5465                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5466                 info.rodata_data = &tg3TsoFwRodata[0];
5467                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5468                 info.data_len = TG3_TSO_FW_DATA_LEN;
5469                 info.data_data = &tg3TsoFwData[0];
5470                 cpu_base = TX_CPU_BASE;
5471                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5472                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5473         }
5474
5475         err = tg3_load_firmware_cpu(tp, cpu_base,
5476                                     cpu_scratch_base, cpu_scratch_size,
5477                                     &info);
5478         if (err)
5479                 return err;
5480
5481         /* Now startup the cpu. */
5482         tw32(cpu_base + CPU_STATE, 0xffffffff);
5483         tw32_f(cpu_base + CPU_PC,    info.text_base);
5484
5485         for (i = 0; i < 5; i++) {
5486                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5487                         break;
5488                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5489                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5490                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5491                 udelay(1000);
5492         }
5493         if (i >= 5) {
5494                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5495                        "to set CPU PC, is %08x should be %08x\n",
5496                        tp->dev->name, tr32(cpu_base + CPU_PC),
5497                        info.text_base);
5498                 return -ENODEV;
5499         }
5500         tw32(cpu_base + CPU_STATE, 0xffffffff);
5501         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5502         return 0;
5503 }
5504
5505 #endif /* TG3_TSO_SUPPORT != 0 */
5506
5507 /* tp->lock is held. */
5508 static void __tg3_set_mac_addr(struct tg3 *tp)
5509 {
5510         u32 addr_high, addr_low;
5511         int i;
5512
5513         addr_high = ((tp->dev->dev_addr[0] << 8) |
5514                      tp->dev->dev_addr[1]);
5515         addr_low = ((tp->dev->dev_addr[2] << 24) |
5516                     (tp->dev->dev_addr[3] << 16) |
5517                     (tp->dev->dev_addr[4] <<  8) |
5518                     (tp->dev->dev_addr[5] <<  0));
5519         for (i = 0; i < 4; i++) {
5520                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5521                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5522         }
5523
5524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5526                 for (i = 0; i < 12; i++) {
5527                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5528                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5529                 }
5530         }
5531
5532         addr_high = (tp->dev->dev_addr[0] +
5533                      tp->dev->dev_addr[1] +
5534                      tp->dev->dev_addr[2] +
5535                      tp->dev->dev_addr[3] +
5536                      tp->dev->dev_addr[4] +
5537                      tp->dev->dev_addr[5]) &
5538                 TX_BACKOFF_SEED_MASK;
5539         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5540 }
5541
5542 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5543 {
5544         struct tg3 *tp = netdev_priv(dev);
5545         struct sockaddr *addr = p;
5546
5547         if (!is_valid_ether_addr(addr->sa_data))
5548                 return -EINVAL;
5549
5550         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5551
5552         if (!netif_running(dev))
5553                 return 0;
5554
5555         spin_lock_bh(&tp->lock);
5556         __tg3_set_mac_addr(tp);
5557         spin_unlock_bh(&tp->lock);
5558
5559         return 0;
5560 }
5561
5562 /* tp->lock is held. */
5563 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5564                            dma_addr_t mapping, u32 maxlen_flags,
5565                            u32 nic_addr)
5566 {
5567         tg3_write_mem(tp,
5568                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5569                       ((u64) mapping >> 32));
5570         tg3_write_mem(tp,
5571                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5572                       ((u64) mapping & 0xffffffff));
5573         tg3_write_mem(tp,
5574                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5575                        maxlen_flags);
5576
5577         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5578                 tg3_write_mem(tp,
5579                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5580                               nic_addr);
5581 }
5582
5583 static void __tg3_set_rx_mode(struct net_device *);
5584 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5585 {
5586         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5587         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5588         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5589         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5590         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5591                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5592                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5593         }
5594         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5595         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5596         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5597                 u32 val = ec->stats_block_coalesce_usecs;
5598
5599                 if (!netif_carrier_ok(tp->dev))
5600                         val = 0;
5601
5602                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5603         }
5604 }
5605
5606 /* tp->lock is held. */
5607 static int tg3_reset_hw(struct tg3 *tp)
5608 {
5609         u32 val, rdmac_mode;
5610         int i, err, limit;
5611
5612         tg3_disable_ints(tp);
5613
5614         tg3_stop_fw(tp);
5615
5616         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5617
5618         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5619                 tg3_abort_hw(tp, 1);
5620         }
5621
5622         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5623                 tg3_phy_reset(tp);
5624
5625         err = tg3_chip_reset(tp);
5626         if (err)
5627                 return err;
5628
5629         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5630
5631         /* This works around an issue with Athlon chipsets on
5632          * B3 tigon3 silicon.  This bit has no effect on any
5633          * other revision.  But do not set this on PCI Express
5634          * chips.
5635          */
5636         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5637                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5638         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5639
5640         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5641             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5642                 val = tr32(TG3PCI_PCISTATE);
5643                 val |= PCISTATE_RETRY_SAME_DMA;
5644                 tw32(TG3PCI_PCISTATE, val);
5645         }
5646
5647         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5648                 /* Enable some hw fixes.  */
5649                 val = tr32(TG3PCI_MSI_DATA);
5650                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5651                 tw32(TG3PCI_MSI_DATA, val);
5652         }
5653
5654         /* Descriptor ring init may make accesses to the
5655          * NIC SRAM area to setup the TX descriptors, so we
5656          * can only do this after the hardware has been
5657          * successfully reset.
5658          */
5659         tg3_init_rings(tp);
5660
5661         /* This value is determined during the probe time DMA
5662          * engine test, tg3_test_dma.
5663          */
5664         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5665
5666         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5667                           GRC_MODE_4X_NIC_SEND_RINGS |
5668                           GRC_MODE_NO_TX_PHDR_CSUM |
5669                           GRC_MODE_NO_RX_PHDR_CSUM);
5670         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5671         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5672                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5673         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5674                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5675
5676         tw32(GRC_MODE,
5677              tp->grc_mode |
5678              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5679
5680         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5681         val = tr32(GRC_MISC_CFG);
5682         val &= ~0xff;
5683         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5684         tw32(GRC_MISC_CFG, val);
5685
5686         /* Initialize MBUF/DESC pool. */
5687         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5688                 /* Do nothing.  */
5689         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5690                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5691                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5692                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5693                 else
5694                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5695                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5696                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5697         }
5698 #if TG3_TSO_SUPPORT != 0
5699         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5700                 int fw_len;
5701
5702                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5703                           TG3_TSO5_FW_RODATA_LEN +
5704                           TG3_TSO5_FW_DATA_LEN +
5705                           TG3_TSO5_FW_SBSS_LEN +
5706                           TG3_TSO5_FW_BSS_LEN);
5707                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5708                 tw32(BUFMGR_MB_POOL_ADDR,
5709                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5710                 tw32(BUFMGR_MB_POOL_SIZE,
5711                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5712         }
5713 #endif
5714
5715         if (tp->dev->mtu <= ETH_DATA_LEN) {
5716                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5717                      tp->bufmgr_config.mbuf_read_dma_low_water);
5718                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5719                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5720                 tw32(BUFMGR_MB_HIGH_WATER,
5721                      tp->bufmgr_config.mbuf_high_water);
5722         } else {
5723                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5724                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5725                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5726                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5727                 tw32(BUFMGR_MB_HIGH_WATER,
5728                      tp->bufmgr_config.mbuf_high_water_jumbo);
5729         }
5730         tw32(BUFMGR_DMA_LOW_WATER,
5731              tp->bufmgr_config.dma_low_water);
5732         tw32(BUFMGR_DMA_HIGH_WATER,
5733              tp->bufmgr_config.dma_high_water);
5734
5735         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5736         for (i = 0; i < 2000; i++) {
5737                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5738                         break;
5739                 udelay(10);
5740         }
5741         if (i >= 2000) {
5742                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5743                        tp->dev->name);
5744                 return -ENODEV;
5745         }
5746
5747         /* Setup replenish threshold. */
5748         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5749
5750         /* Initialize TG3_BDINFO's at:
5751          *  RCVDBDI_STD_BD:     standard eth size rx ring
5752          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5753          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5754          *
5755          * like so:
5756          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5757          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5758          *                              ring attribute flags
5759          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5760          *
5761          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5762          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5763          *
5764          * The size of each ring is fixed in the firmware, but the location is
5765          * configurable.
5766          */
5767         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5768              ((u64) tp->rx_std_mapping >> 32));
5769         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5770              ((u64) tp->rx_std_mapping & 0xffffffff));
5771         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5772              NIC_SRAM_RX_BUFFER_DESC);
5773
5774         /* Don't even try to program the JUMBO/MINI buffer descriptor
5775          * configs on 5705.
5776          */
5777         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5778                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5779                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5780         } else {
5781                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5782                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5783
5784                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5785                      BDINFO_FLAGS_DISABLED);
5786
5787                 /* Setup replenish threshold. */
5788                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5789
5790                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5791                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5792                              ((u64) tp->rx_jumbo_mapping >> 32));
5793                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5794                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5795                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5796                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5797                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5798                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5799                 } else {
5800                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5801                              BDINFO_FLAGS_DISABLED);
5802                 }
5803
5804         }
5805
5806         /* There is only one send ring on 5705/5750, no need to explicitly
5807          * disable the others.
5808          */
5809         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5810                 /* Clear out send RCB ring in SRAM. */
5811                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5812                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5813                                       BDINFO_FLAGS_DISABLED);
5814         }
5815
5816         tp->tx_prod = 0;
5817         tp->tx_cons = 0;
5818         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5819         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5820
5821         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5822                        tp->tx_desc_mapping,
5823                        (TG3_TX_RING_SIZE <<
5824                         BDINFO_FLAGS_MAXLEN_SHIFT),
5825                        NIC_SRAM_TX_BUFFER_DESC);
5826
5827         /* There is only one receive return ring on 5705/5750, no need
5828          * to explicitly disable the others.
5829          */
5830         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5831                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5832                      i += TG3_BDINFO_SIZE) {
5833                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5834                                       BDINFO_FLAGS_DISABLED);
5835                 }
5836         }
5837
5838         tp->rx_rcb_ptr = 0;
5839         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5840
5841         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5842                        tp->rx_rcb_mapping,
5843                        (TG3_RX_RCB_RING_SIZE(tp) <<
5844                         BDINFO_FLAGS_MAXLEN_SHIFT),
5845                        0);
5846
5847         tp->rx_std_ptr = tp->rx_pending;
5848         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5849                      tp->rx_std_ptr);
5850
5851         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5852                                                 tp->rx_jumbo_pending : 0;
5853         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5854                      tp->rx_jumbo_ptr);
5855
5856         /* Initialize MAC address and backoff seed. */
5857         __tg3_set_mac_addr(tp);
5858
5859         /* MTU + ethernet header + FCS + optional VLAN tag */
5860         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5861
5862         /* The slot time is changed by tg3_setup_phy if we
5863          * run at gigabit with half duplex.
5864          */
5865         tw32(MAC_TX_LENGTHS,
5866              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5867              (6 << TX_LENGTHS_IPG_SHIFT) |
5868              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5869
5870         /* Receive rules. */
5871         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5872         tw32(RCVLPC_CONFIG, 0x0181);
5873
5874         /* Calculate RDMAC_MODE setting early, we need it to determine
5875          * the RCVLPC_STATE_ENABLE mask.
5876          */
5877         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5878                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5879                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5880                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5881                       RDMAC_MODE_LNGREAD_ENAB);
5882         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5883                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5884
5885         /* If statement applies to 5705 and 5750 PCI devices only */
5886         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5887              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5888             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5889                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5890                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5891                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5892                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5893                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5894                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5895                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5896                 }
5897         }
5898
5899         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5900                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5901
5902 #if TG3_TSO_SUPPORT != 0
5903         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5904                 rdmac_mode |= (1 << 27);
5905 #endif
5906
5907         /* Receive/send statistics. */
5908         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5909             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5910                 val = tr32(RCVLPC_STATS_ENABLE);
5911                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5912                 tw32(RCVLPC_STATS_ENABLE, val);
5913         } else {
5914                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5915         }
5916         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5917         tw32(SNDDATAI_STATSENAB, 0xffffff);
5918         tw32(SNDDATAI_STATSCTRL,
5919              (SNDDATAI_SCTRL_ENABLE |
5920               SNDDATAI_SCTRL_FASTUPD));
5921
5922         /* Setup host coalescing engine. */
5923         tw32(HOSTCC_MODE, 0);
5924         for (i = 0; i < 2000; i++) {
5925                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5926                         break;
5927                 udelay(10);
5928         }
5929
5930         __tg3_set_coalesce(tp, &tp->coal);
5931
5932         /* set status block DMA address */
5933         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5934              ((u64) tp->status_mapping >> 32));
5935         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5936              ((u64) tp->status_mapping & 0xffffffff));
5937
5938         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5939                 /* Status/statistics block address.  See tg3_timer,
5940                  * the tg3_periodic_fetch_stats call there, and
5941                  * tg3_get_stats to see how this works for 5705/5750 chips.
5942                  */
5943                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5944                      ((u64) tp->stats_mapping >> 32));
5945                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5946                      ((u64) tp->stats_mapping & 0xffffffff));
5947                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5948                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5949         }
5950
5951         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5952
5953         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5954         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5955         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5956                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5957
5958         /* Clear statistics/status block in chip, and status block in ram. */
5959         for (i = NIC_SRAM_STATS_BLK;
5960              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5961              i += sizeof(u32)) {
5962                 tg3_write_mem(tp, i, 0);
5963                 udelay(40);
5964         }
5965         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5966
5967         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5968                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5969                 /* reset to prevent losing 1st rx packet intermittently */
5970                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5971                 udelay(10);
5972         }
5973
5974         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5975                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5976         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5977         udelay(40);
5978
5979         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5980          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5981          * register to preserve the GPIO settings for LOMs. The GPIOs,
5982          * whether used as inputs or outputs, are set by boot code after
5983          * reset.
5984          */
5985         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5986                 u32 gpio_mask;
5987
5988                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5989                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5990
5991                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5992                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5993                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5994
5995                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5996
5997                 /* GPIO1 must be driven high for eeprom write protect */
5998                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5999                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6000         }
6001         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6002         udelay(100);
6003
6004         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6005         tp->last_tag = 0;
6006
6007         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6008                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6009                 udelay(40);
6010         }
6011
6012         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6013                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6014                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6015                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6016                WDMAC_MODE_LNGREAD_ENAB);
6017
6018         /* If statement applies to 5705 and 5750 PCI devices only */
6019         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6020              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6022                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6023                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6024                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6025                         /* nothing */
6026                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6027                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6028                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6029                         val |= WDMAC_MODE_RX_ACCEL;
6030                 }
6031         }
6032
6033         /* Enable host coalescing bug fix */
6034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
6035                 val |= (1 << 29);
6036
6037         tw32_f(WDMAC_MODE, val);
6038         udelay(40);
6039
6040         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6041                 val = tr32(TG3PCI_X_CAPS);
6042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6043                         val &= ~PCIX_CAPS_BURST_MASK;
6044                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6045                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6046                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6047                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6048                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6049                                 val |= (tp->split_mode_max_reqs <<
6050                                         PCIX_CAPS_SPLIT_SHIFT);
6051                 }
6052                 tw32(TG3PCI_X_CAPS, val);
6053         }
6054
6055         tw32_f(RDMAC_MODE, rdmac_mode);
6056         udelay(40);
6057
6058         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6059         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6060                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6061         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6062         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6063         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6064         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6065         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6066 #if TG3_TSO_SUPPORT != 0
6067         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6068                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6069 #endif
6070         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6071         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6072
6073         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6074                 err = tg3_load_5701_a0_firmware_fix(tp);
6075                 if (err)
6076                         return err;
6077         }
6078
6079 #if TG3_TSO_SUPPORT != 0
6080         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6081                 err = tg3_load_tso_firmware(tp);
6082                 if (err)
6083                         return err;
6084         }
6085 #endif
6086
6087         tp->tx_mode = TX_MODE_ENABLE;
6088         tw32_f(MAC_TX_MODE, tp->tx_mode);
6089         udelay(100);
6090
6091         tp->rx_mode = RX_MODE_ENABLE;
6092         tw32_f(MAC_RX_MODE, tp->rx_mode);
6093         udelay(10);
6094
6095         if (tp->link_config.phy_is_low_power) {
6096                 tp->link_config.phy_is_low_power = 0;
6097                 tp->link_config.speed = tp->link_config.orig_speed;
6098                 tp->link_config.duplex = tp->link_config.orig_duplex;
6099                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6100         }
6101
6102         tp->mi_mode = MAC_MI_MODE_BASE;
6103         tw32_f(MAC_MI_MODE, tp->mi_mode);
6104         udelay(80);
6105
6106         tw32(MAC_LED_CTRL, tp->led_ctrl);
6107
6108         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6109         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6110                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6111                 udelay(10);
6112         }
6113         tw32_f(MAC_RX_MODE, tp->rx_mode);
6114         udelay(10);
6115
6116         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6117                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6118                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6119                         /* Set drive transmission level to 1.2V  */
6120                         /* only if the signal pre-emphasis bit is not set  */
6121                         val = tr32(MAC_SERDES_CFG);
6122                         val &= 0xfffff000;
6123                         val |= 0x880;
6124                         tw32(MAC_SERDES_CFG, val);
6125                 }
6126                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6127                         tw32(MAC_SERDES_CFG, 0x616000);
6128         }
6129
6130         /* Prevent chip from dropping frames when flow control
6131          * is enabled.
6132          */
6133         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6134
6135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6136             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6137                 /* Use hardware link auto-negotiation */
6138                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6139         }
6140
6141         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6142             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6143                 u32 tmp;
6144
6145                 tmp = tr32(SERDES_RX_CTRL);
6146                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6147                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6148                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6149                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6150         }
6151
6152         err = tg3_setup_phy(tp, 1);
6153         if (err)
6154                 return err;
6155
6156         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6157                 u32 tmp;
6158
6159                 /* Clear CRC stats. */
6160                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6161                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6162                         tg3_readphy(tp, 0x14, &tmp);
6163                 }
6164         }
6165
6166         __tg3_set_rx_mode(tp->dev);
6167
6168         /* Initialize receive rules. */
6169         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6170         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6171         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6172         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6173
6174         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6175             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6176                 limit = 8;
6177         else
6178                 limit = 16;
6179         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6180                 limit -= 4;
6181         switch (limit) {
6182         case 16:
6183                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6184         case 15:
6185                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6186         case 14:
6187                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6188         case 13:
6189                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6190         case 12:
6191                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6192         case 11:
6193                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6194         case 10:
6195                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6196         case 9:
6197                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6198         case 8:
6199                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6200         case 7:
6201                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6202         case 6:
6203                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6204         case 5:
6205                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6206         case 4:
6207                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6208         case 3:
6209                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6210         case 2:
6211         case 1:
6212
6213         default:
6214                 break;
6215         };
6216
6217         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6218
6219         return 0;
6220 }
6221
6222 /* Called at device open time to get the chip ready for
6223  * packet processing.  Invoked with tp->lock held.
6224  */
6225 static int tg3_init_hw(struct tg3 *tp)
6226 {
6227         int err;
6228
6229         /* Force the chip into D0. */
6230         err = tg3_set_power_state(tp, PCI_D0);
6231         if (err)
6232                 goto out;
6233
6234         tg3_switch_clocks(tp);
6235
6236         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6237
6238         err = tg3_reset_hw(tp);
6239
6240 out:
6241         return err;
6242 }
6243
6244 #define TG3_STAT_ADD32(PSTAT, REG) \
6245 do {    u32 __val = tr32(REG); \
6246         (PSTAT)->low += __val; \
6247         if ((PSTAT)->low < __val) \
6248                 (PSTAT)->high += 1; \
6249 } while (0)
6250
6251 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6252 {
6253         struct tg3_hw_stats *sp = tp->hw_stats;
6254
6255         if (!netif_carrier_ok(tp->dev))
6256                 return;
6257
6258         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6259         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6260         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6261         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6262         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6263         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6264         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6265         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6266         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6267         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6268         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6269         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6270         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6271
6272         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6273         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6274         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6275         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6276         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6277         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6278         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6279         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6280         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6281         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6282         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6283         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6284         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6285         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6286 }
6287
6288 static void tg3_timer(unsigned long __opaque)
6289 {
6290         struct tg3 *tp = (struct tg3 *) __opaque;
6291
6292         spin_lock(&tp->lock);
6293
6294         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6295                 /* All of this garbage is because when using non-tagged
6296                  * IRQ status the mailbox/status_block protocol the chip
6297                  * uses with the cpu is race prone.
6298                  */
6299                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6300                         tw32(GRC_LOCAL_CTRL,
6301                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6302                 } else {
6303                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6304                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6305                 }
6306
6307                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6308                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6309                         spin_unlock(&tp->lock);
6310                         schedule_work(&tp->reset_task);
6311                         return;
6312                 }
6313         }
6314
6315         /* This part only runs once per second. */
6316         if (!--tp->timer_counter) {
6317                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6318                         tg3_periodic_fetch_stats(tp);
6319
6320                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6321                         u32 mac_stat;
6322                         int phy_event;
6323
6324                         mac_stat = tr32(MAC_STATUS);
6325
6326                         phy_event = 0;
6327                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6328                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6329                                         phy_event = 1;
6330                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6331                                 phy_event = 1;
6332
6333                         if (phy_event)
6334                                 tg3_setup_phy(tp, 0);
6335                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6336                         u32 mac_stat = tr32(MAC_STATUS);
6337                         int need_setup = 0;
6338
6339                         if (netif_carrier_ok(tp->dev) &&
6340                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6341                                 need_setup = 1;
6342                         }
6343                         if (! netif_carrier_ok(tp->dev) &&
6344                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6345                                          MAC_STATUS_SIGNAL_DET))) {
6346                                 need_setup = 1;
6347                         }
6348                         if (need_setup) {
6349                                 tw32_f(MAC_MODE,
6350                                      (tp->mac_mode &
6351                                       ~MAC_MODE_PORT_MODE_MASK));
6352                                 udelay(40);
6353                                 tw32_f(MAC_MODE, tp->mac_mode);
6354                                 udelay(40);
6355                                 tg3_setup_phy(tp, 0);
6356                         }
6357                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6358                         tg3_serdes_parallel_detect(tp);
6359
6360                 tp->timer_counter = tp->timer_multiplier;
6361         }
6362
6363         /* Heartbeat is only sent once every 2 seconds.  */
6364         if (!--tp->asf_counter) {
6365                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6366                         u32 val;
6367
6368                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6369                                            FWCMD_NICDRV_ALIVE2);
6370                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6371                         /* 5 seconds timeout */
6372                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6373                         val = tr32(GRC_RX_CPU_EVENT);
6374                         val |= (1 << 14);
6375                         tw32(GRC_RX_CPU_EVENT, val);
6376                 }
6377                 tp->asf_counter = tp->asf_multiplier;
6378         }
6379
6380         spin_unlock(&tp->lock);
6381
6382         tp->timer.expires = jiffies + tp->timer_offset;
6383         add_timer(&tp->timer);
6384 }
6385
6386 static int tg3_test_interrupt(struct tg3 *tp)
6387 {
6388         struct net_device *dev = tp->dev;
6389         int err, i;
6390         u32 int_mbox = 0;
6391
6392         if (!netif_running(dev))
6393                 return -ENODEV;
6394
6395         tg3_disable_ints(tp);
6396
6397         free_irq(tp->pdev->irq, dev);
6398
6399         err = request_irq(tp->pdev->irq, tg3_test_isr,
6400                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6401         if (err)
6402                 return err;
6403
6404         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6405         tg3_enable_ints(tp);
6406
6407         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6408                HOSTCC_MODE_NOW);
6409
6410         for (i = 0; i < 5; i++) {
6411                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6412                                         TG3_64BIT_REG_LOW);
6413                 if (int_mbox != 0)
6414                         break;
6415                 msleep(10);
6416         }
6417
6418         tg3_disable_ints(tp);
6419
6420         free_irq(tp->pdev->irq, dev);
6421         
6422         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6423                 err = request_irq(tp->pdev->irq, tg3_msi,
6424                                   SA_SAMPLE_RANDOM, dev->name, dev);
6425         else {
6426                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6427                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6428                         fn = tg3_interrupt_tagged;
6429                 err = request_irq(tp->pdev->irq, fn,
6430                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6431         }
6432
6433         if (err)
6434                 return err;
6435
6436         if (int_mbox != 0)
6437                 return 0;
6438
6439         return -EIO;
6440 }
6441
6442 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6443  * successfully restored
6444  */
6445 static int tg3_test_msi(struct tg3 *tp)
6446 {
6447         struct net_device *dev = tp->dev;
6448         int err;
6449         u16 pci_cmd;
6450
6451         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6452                 return 0;
6453
6454         /* Turn off SERR reporting in case MSI terminates with Master
6455          * Abort.
6456          */
6457         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6458         pci_write_config_word(tp->pdev, PCI_COMMAND,
6459                               pci_cmd & ~PCI_COMMAND_SERR);
6460
6461         err = tg3_test_interrupt(tp);
6462
6463         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6464
6465         if (!err)
6466                 return 0;
6467
6468         /* other failures */
6469         if (err != -EIO)
6470                 return err;
6471
6472         /* MSI test failed, go back to INTx mode */
6473         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6474                "switching to INTx mode. Please report this failure to "
6475                "the PCI maintainer and include system chipset information.\n",
6476                        tp->dev->name);
6477
6478         free_irq(tp->pdev->irq, dev);
6479         pci_disable_msi(tp->pdev);
6480
6481         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6482
6483         {
6484                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6485                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6486                         fn = tg3_interrupt_tagged;
6487
6488                 err = request_irq(tp->pdev->irq, fn,
6489                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6490         }
6491         if (err)
6492                 return err;
6493
6494         /* Need to reset the chip because the MSI cycle may have terminated
6495          * with Master Abort.
6496          */
6497         tg3_full_lock(tp, 1);
6498
6499         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6500         err = tg3_init_hw(tp);
6501
6502         tg3_full_unlock(tp);
6503
6504         if (err)
6505                 free_irq(tp->pdev->irq, dev);
6506
6507         return err;
6508 }
6509
6510 static int tg3_open(struct net_device *dev)
6511 {
6512         struct tg3 *tp = netdev_priv(dev);
6513         int err;
6514
6515         tg3_full_lock(tp, 0);
6516
6517         err = tg3_set_power_state(tp, PCI_D0);
6518         if (err)
6519                 return err;
6520
6521         tg3_disable_ints(tp);
6522         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6523
6524         tg3_full_unlock(tp);
6525
6526         /* The placement of this call is tied
6527          * to the setup and use of Host TX descriptors.
6528          */
6529         err = tg3_alloc_consistent(tp);
6530         if (err)
6531                 return err;
6532
6533         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6534             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6535             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6536             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6537               (tp->pdev_peer == tp->pdev))) {
6538                 /* All MSI supporting chips should support tagged
6539                  * status.  Assert that this is the case.
6540                  */
6541                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6542                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6543                                "Not using MSI.\n", tp->dev->name);
6544                 } else if (pci_enable_msi(tp->pdev) == 0) {
6545                         u32 msi_mode;
6546
6547                         msi_mode = tr32(MSGINT_MODE);
6548                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6549                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6550                 }
6551         }
6552         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6553                 err = request_irq(tp->pdev->irq, tg3_msi,
6554                                   SA_SAMPLE_RANDOM, dev->name, dev);
6555         else {
6556                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6557                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6558                         fn = tg3_interrupt_tagged;
6559
6560                 err = request_irq(tp->pdev->irq, fn,
6561                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6562         }
6563
6564         if (err) {
6565                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6566                         pci_disable_msi(tp->pdev);
6567                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6568                 }
6569                 tg3_free_consistent(tp);
6570                 return err;
6571         }
6572
6573         tg3_full_lock(tp, 0);
6574
6575         err = tg3_init_hw(tp);
6576         if (err) {
6577                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6578                 tg3_free_rings(tp);
6579         } else {
6580                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6581                         tp->timer_offset = HZ;
6582                 else
6583                         tp->timer_offset = HZ / 10;
6584
6585                 BUG_ON(tp->timer_offset > HZ);
6586                 tp->timer_counter = tp->timer_multiplier =
6587                         (HZ / tp->timer_offset);
6588                 tp->asf_counter = tp->asf_multiplier =
6589                         ((HZ / tp->timer_offset) * 2);
6590
6591                 init_timer(&tp->timer);
6592                 tp->timer.expires = jiffies + tp->timer_offset;
6593                 tp->timer.data = (unsigned long) tp;
6594                 tp->timer.function = tg3_timer;
6595         }
6596
6597         tg3_full_unlock(tp);
6598
6599         if (err) {
6600                 free_irq(tp->pdev->irq, dev);
6601                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6602                         pci_disable_msi(tp->pdev);
6603                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6604                 }
6605                 tg3_free_consistent(tp);
6606                 return err;
6607         }
6608
6609         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6610                 err = tg3_test_msi(tp);
6611
6612                 if (err) {
6613                         tg3_full_lock(tp, 0);
6614
6615                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6616                                 pci_disable_msi(tp->pdev);
6617                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6618                         }
6619                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6620                         tg3_free_rings(tp);
6621                         tg3_free_consistent(tp);
6622
6623                         tg3_full_unlock(tp);
6624
6625                         return err;
6626                 }
6627         }
6628
6629         tg3_full_lock(tp, 0);
6630
6631         add_timer(&tp->timer);
6632         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6633         tg3_enable_ints(tp);
6634
6635         tg3_full_unlock(tp);
6636
6637         netif_start_queue(dev);
6638
6639         return 0;
6640 }
6641
6642 #if 0
6643 /*static*/ void tg3_dump_state(struct tg3 *tp)
6644 {
6645         u32 val32, val32_2, val32_3, val32_4, val32_5;
6646         u16 val16;
6647         int i;
6648
6649         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6650         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6651         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6652                val16, val32);
6653
6654         /* MAC block */
6655         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6656                tr32(MAC_MODE), tr32(MAC_STATUS));
6657         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6658                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6659         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6660                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6661         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6662                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6663
6664         /* Send data initiator control block */
6665         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6666                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6667         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6668                tr32(SNDDATAI_STATSCTRL));
6669
6670         /* Send data completion control block */
6671         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6672
6673         /* Send BD ring selector block */
6674         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6675                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6676
6677         /* Send BD initiator control block */
6678         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6679                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6680
6681         /* Send BD completion control block */
6682         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6683
6684         /* Receive list placement control block */
6685         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6686                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6687         printk("       RCVLPC_STATSCTRL[%08x]\n",
6688                tr32(RCVLPC_STATSCTRL));
6689
6690         /* Receive data and receive BD initiator control block */
6691         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6692                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6693
6694         /* Receive data completion control block */
6695         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6696                tr32(RCVDCC_MODE));
6697
6698         /* Receive BD initiator control block */
6699         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6700                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6701
6702         /* Receive BD completion control block */
6703         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6704                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6705
6706         /* Receive list selector control block */
6707         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6708                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6709
6710         /* Mbuf cluster free block */
6711         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6712                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6713
6714         /* Host coalescing control block */
6715         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6716                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6717         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6718                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6719                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6720         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6721                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6722                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6723         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6724                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6725         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6726                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6727
6728         /* Memory arbiter control block */
6729         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6730                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6731
6732         /* Buffer manager control block */
6733         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6734                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6735         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6736                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6737         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6738                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6739                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6740                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6741
6742         /* Read DMA control block */
6743         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6744                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6745
6746         /* Write DMA control block */
6747         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6748                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6749
6750         /* DMA completion block */
6751         printk("DEBUG: DMAC_MODE[%08x]\n",
6752                tr32(DMAC_MODE));
6753
6754         /* GRC block */
6755         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6756                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6757         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6758                tr32(GRC_LOCAL_CTRL));
6759
6760         /* TG3_BDINFOs */
6761         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6762                tr32(RCVDBDI_JUMBO_BD + 0x0),
6763                tr32(RCVDBDI_JUMBO_BD + 0x4),
6764                tr32(RCVDBDI_JUMBO_BD + 0x8),
6765                tr32(RCVDBDI_JUMBO_BD + 0xc));
6766         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6767                tr32(RCVDBDI_STD_BD + 0x0),
6768                tr32(RCVDBDI_STD_BD + 0x4),
6769                tr32(RCVDBDI_STD_BD + 0x8),
6770                tr32(RCVDBDI_STD_BD + 0xc));
6771         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6772                tr32(RCVDBDI_MINI_BD + 0x0),
6773                tr32(RCVDBDI_MINI_BD + 0x4),
6774                tr32(RCVDBDI_MINI_BD + 0x8),
6775                tr32(RCVDBDI_MINI_BD + 0xc));
6776
6777         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6778         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6779         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6780         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6781         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6782                val32, val32_2, val32_3, val32_4);
6783
6784         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6785         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6786         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6787         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6788         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6789                val32, val32_2, val32_3, val32_4);
6790
6791         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6792         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6793         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6794         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6795         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6796         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6797                val32, val32_2, val32_3, val32_4, val32_5);
6798
6799         /* SW status block */
6800         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6801                tp->hw_status->status,
6802                tp->hw_status->status_tag,
6803                tp->hw_status->rx_jumbo_consumer,
6804                tp->hw_status->rx_consumer,
6805                tp->hw_status->rx_mini_consumer,
6806                tp->hw_status->idx[0].rx_producer,
6807                tp->hw_status->idx[0].tx_consumer);
6808
6809         /* SW statistics block */
6810         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6811                ((u32 *)tp->hw_stats)[0],
6812                ((u32 *)tp->hw_stats)[1],
6813                ((u32 *)tp->hw_stats)[2],
6814                ((u32 *)tp->hw_stats)[3]);
6815
6816         /* Mailboxes */
6817         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6818                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6819                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6820                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6821                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6822
6823         /* NIC side send descriptors. */
6824         for (i = 0; i < 6; i++) {
6825                 unsigned long txd;
6826
6827                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6828                         + (i * sizeof(struct tg3_tx_buffer_desc));
6829                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6830                        i,
6831                        readl(txd + 0x0), readl(txd + 0x4),
6832                        readl(txd + 0x8), readl(txd + 0xc));
6833         }
6834
6835         /* NIC side RX descriptors. */
6836         for (i = 0; i < 6; i++) {
6837                 unsigned long rxd;
6838
6839                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6840                         + (i * sizeof(struct tg3_rx_buffer_desc));
6841                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6842                        i,
6843                        readl(rxd + 0x0), readl(rxd + 0x4),
6844                        readl(rxd + 0x8), readl(rxd + 0xc));
6845                 rxd += (4 * sizeof(u32));
6846                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6847                        i,
6848                        readl(rxd + 0x0), readl(rxd + 0x4),
6849                        readl(rxd + 0x8), readl(rxd + 0xc));
6850         }
6851
6852         for (i = 0; i < 6; i++) {
6853                 unsigned long rxd;
6854
6855                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6856                         + (i * sizeof(struct tg3_rx_buffer_desc));
6857                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6858                        i,
6859                        readl(rxd + 0x0), readl(rxd + 0x4),
6860                        readl(rxd + 0x8), readl(rxd + 0xc));
6861                 rxd += (4 * sizeof(u32));
6862                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6863                        i,
6864                        readl(rxd + 0x0), readl(rxd + 0x4),
6865                        readl(rxd + 0x8), readl(rxd + 0xc));
6866         }
6867 }
6868 #endif
6869
6870 static struct net_device_stats *tg3_get_stats(struct net_device *);
6871 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6872
6873 static int tg3_close(struct net_device *dev)
6874 {
6875         struct tg3 *tp = netdev_priv(dev);
6876
6877         /* Calling flush_scheduled_work() may deadlock because
6878          * linkwatch_event() may be on the workqueue and it will try to get
6879          * the rtnl_lock which we are holding.
6880          */
6881         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6882                 msleep(1);
6883
6884         netif_stop_queue(dev);
6885
6886         del_timer_sync(&tp->timer);
6887
6888         tg3_full_lock(tp, 1);
6889 #if 0
6890         tg3_dump_state(tp);
6891 #endif
6892
6893         tg3_disable_ints(tp);
6894
6895         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6896         tg3_free_rings(tp);
6897         tp->tg3_flags &=
6898                 ~(TG3_FLAG_INIT_COMPLETE |
6899                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6900
6901         tg3_full_unlock(tp);
6902
6903         free_irq(tp->pdev->irq, dev);
6904         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6905                 pci_disable_msi(tp->pdev);
6906                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6907         }
6908
6909         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6910                sizeof(tp->net_stats_prev));
6911         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6912                sizeof(tp->estats_prev));
6913
6914         tg3_free_consistent(tp);
6915
6916         tg3_set_power_state(tp, PCI_D3hot);
6917
6918         netif_carrier_off(tp->dev);
6919
6920         return 0;
6921 }
6922
6923 static inline unsigned long get_stat64(tg3_stat64_t *val)
6924 {
6925         unsigned long ret;
6926
6927 #if (BITS_PER_LONG == 32)
6928         ret = val->low;
6929 #else
6930         ret = ((u64)val->high << 32) | ((u64)val->low);
6931 #endif
6932         return ret;
6933 }
6934
6935 static unsigned long calc_crc_errors(struct tg3 *tp)
6936 {
6937         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6938
6939         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6940             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6941              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6942                 u32 val;
6943
6944                 spin_lock_bh(&tp->lock);
6945                 if (!tg3_readphy(tp, 0x1e, &val)) {
6946                         tg3_writephy(tp, 0x1e, val | 0x8000);
6947                         tg3_readphy(tp, 0x14, &val);
6948                 } else
6949                         val = 0;
6950                 spin_unlock_bh(&tp->lock);
6951
6952                 tp->phy_crc_errors += val;
6953
6954                 return tp->phy_crc_errors;
6955         }
6956
6957         return get_stat64(&hw_stats->rx_fcs_errors);
6958 }
6959
6960 #define ESTAT_ADD(member) \
6961         estats->member =        old_estats->member + \
6962                                 get_stat64(&hw_stats->member)
6963
6964 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6965 {
6966         struct tg3_ethtool_stats *estats = &tp->estats;
6967         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6968         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6969
6970         if (!hw_stats)
6971                 return old_estats;
6972
6973         ESTAT_ADD(rx_octets);
6974         ESTAT_ADD(rx_fragments);
6975         ESTAT_ADD(rx_ucast_packets);
6976         ESTAT_ADD(rx_mcast_packets);
6977         ESTAT_ADD(rx_bcast_packets);
6978         ESTAT_ADD(rx_fcs_errors);
6979         ESTAT_ADD(rx_align_errors);
6980         ESTAT_ADD(rx_xon_pause_rcvd);
6981         ESTAT_ADD(rx_xoff_pause_rcvd);
6982         ESTAT_ADD(rx_mac_ctrl_rcvd);
6983         ESTAT_ADD(rx_xoff_entered);
6984         ESTAT_ADD(rx_frame_too_long_errors);
6985         ESTAT_ADD(rx_jabbers);
6986         ESTAT_ADD(rx_undersize_packets);
6987         ESTAT_ADD(rx_in_length_errors);
6988         ESTAT_ADD(rx_out_length_errors);
6989         ESTAT_ADD(rx_64_or_less_octet_packets);
6990         ESTAT_ADD(rx_65_to_127_octet_packets);
6991         ESTAT_ADD(rx_128_to_255_octet_packets);
6992         ESTAT_ADD(rx_256_to_511_octet_packets);
6993         ESTAT_ADD(rx_512_to_1023_octet_packets);
6994         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6995         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6996         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6997         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6998         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6999
7000         ESTAT_ADD(tx_octets);
7001         ESTAT_ADD(tx_collisions);
7002         ESTAT_ADD(tx_xon_sent);
7003         ESTAT_ADD(tx_xoff_sent);
7004         ESTAT_ADD(tx_flow_control);
7005         ESTAT_ADD(tx_mac_errors);
7006         ESTAT_ADD(tx_single_collisions);
7007         ESTAT_ADD(tx_mult_collisions);
7008         ESTAT_ADD(tx_deferred);
7009         ESTAT_ADD(tx_excessive_collisions);
7010         ESTAT_ADD(tx_late_collisions);
7011         ESTAT_ADD(tx_collide_2times);
7012         ESTAT_ADD(tx_collide_3times);
7013         ESTAT_ADD(tx_collide_4times);
7014         ESTAT_ADD(tx_collide_5times);
7015         ESTAT_ADD(tx_collide_6times);
7016         ESTAT_ADD(tx_collide_7times);
7017         ESTAT_ADD(tx_collide_8times);
7018         ESTAT_ADD(tx_collide_9times);
7019         ESTAT_ADD(tx_collide_10times);
7020         ESTAT_ADD(tx_collide_11times);
7021         ESTAT_ADD(tx_collide_12times);
7022         ESTAT_ADD(tx_collide_13times);
7023         ESTAT_ADD(tx_collide_14times);
7024         ESTAT_ADD(tx_collide_15times);
7025         ESTAT_ADD(tx_ucast_packets);
7026         ESTAT_ADD(tx_mcast_packets);
7027         ESTAT_ADD(tx_bcast_packets);
7028         ESTAT_ADD(tx_carrier_sense_errors);
7029         ESTAT_ADD(tx_discards);
7030         ESTAT_ADD(tx_errors);
7031
7032         ESTAT_ADD(dma_writeq_full);
7033         ESTAT_ADD(dma_write_prioq_full);
7034         ESTAT_ADD(rxbds_empty);
7035         ESTAT_ADD(rx_discards);
7036         ESTAT_ADD(rx_errors);
7037         ESTAT_ADD(rx_threshold_hit);
7038
7039         ESTAT_ADD(dma_readq_full);
7040         ESTAT_ADD(dma_read_prioq_full);
7041         ESTAT_ADD(tx_comp_queue_full);
7042
7043         ESTAT_ADD(ring_set_send_prod_index);
7044         ESTAT_ADD(ring_status_update);
7045         ESTAT_ADD(nic_irqs);
7046         ESTAT_ADD(nic_avoided_irqs);
7047         ESTAT_ADD(nic_tx_threshold_hit);
7048
7049         return estats;
7050 }
7051
7052 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7053 {
7054         struct tg3 *tp = netdev_priv(dev);
7055         struct net_device_stats *stats = &tp->net_stats;
7056         struct net_device_stats *old_stats = &tp->net_stats_prev;
7057         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7058
7059         if (!hw_stats)
7060                 return old_stats;
7061
7062         stats->rx_packets = old_stats->rx_packets +
7063                 get_stat64(&hw_stats->rx_ucast_packets) +
7064                 get_stat64(&hw_stats->rx_mcast_packets) +
7065                 get_stat64(&hw_stats->rx_bcast_packets);
7066                 
7067         stats->tx_packets = old_stats->tx_packets +
7068                 get_stat64(&hw_stats->tx_ucast_packets) +
7069                 get_stat64(&hw_stats->tx_mcast_packets) +
7070                 get_stat64(&hw_stats->tx_bcast_packets);
7071
7072         stats->rx_bytes = old_stats->rx_bytes +
7073                 get_stat64(&hw_stats->rx_octets);
7074         stats->tx_bytes = old_stats->tx_bytes +
7075                 get_stat64(&hw_stats->tx_octets);
7076
7077         stats->rx_errors = old_stats->rx_errors +
7078                 get_stat64(&hw_stats->rx_errors);
7079         stats->tx_errors = old_stats->tx_errors +
7080                 get_stat64(&hw_stats->tx_errors) +
7081                 get_stat64(&hw_stats->tx_mac_errors) +
7082                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7083                 get_stat64(&hw_stats->tx_discards);
7084
7085         stats->multicast = old_stats->multicast +
7086                 get_stat64(&hw_stats->rx_mcast_packets);
7087         stats->collisions = old_stats->collisions +
7088                 get_stat64(&hw_stats->tx_collisions);
7089
7090         stats->rx_length_errors = old_stats->rx_length_errors +
7091                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7092                 get_stat64(&hw_stats->rx_undersize_packets);
7093
7094         stats->rx_over_errors = old_stats->rx_over_errors +
7095                 get_stat64(&hw_stats->rxbds_empty);
7096         stats->rx_frame_errors = old_stats->rx_frame_errors +
7097                 get_stat64(&hw_stats->rx_align_errors);
7098         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7099                 get_stat64(&hw_stats->tx_discards);
7100         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7101                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7102
7103         stats->rx_crc_errors = old_stats->rx_crc_errors +
7104                 calc_crc_errors(tp);
7105
7106         stats->rx_missed_errors = old_stats->rx_missed_errors +
7107                 get_stat64(&hw_stats->rx_discards);
7108
7109         return stats;
7110 }
7111
7112 static inline u32 calc_crc(unsigned char *buf, int len)
7113 {
7114         u32 reg;
7115         u32 tmp;
7116         int j, k;
7117
7118         reg = 0xffffffff;
7119
7120         for (j = 0; j < len; j++) {
7121                 reg ^= buf[j];
7122
7123                 for (k = 0; k < 8; k++) {
7124                         tmp = reg & 0x01;
7125
7126                         reg >>= 1;
7127
7128                         if (tmp) {
7129                                 reg ^= 0xedb88320;
7130                         }
7131                 }
7132         }
7133
7134         return ~reg;
7135 }
7136
7137 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7138 {
7139         /* accept or reject all multicast frames */
7140         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7141         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7142         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7143         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7144 }
7145
7146 static void __tg3_set_rx_mode(struct net_device *dev)
7147 {
7148         struct tg3 *tp = netdev_priv(dev);
7149         u32 rx_mode;
7150
7151         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7152                                   RX_MODE_KEEP_VLAN_TAG);
7153
7154         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7155          * flag clear.
7156          */
7157 #if TG3_VLAN_TAG_USED
7158         if (!tp->vlgrp &&
7159             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7160                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7161 #else
7162         /* By definition, VLAN is disabled always in this
7163          * case.
7164          */
7165         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7166                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7167 #endif
7168
7169         if (dev->flags & IFF_PROMISC) {
7170                 /* Promiscuous mode. */
7171                 rx_mode |= RX_MODE_PROMISC;
7172         } else if (dev->flags & IFF_ALLMULTI) {
7173                 /* Accept all multicast. */
7174                 tg3_set_multi (tp, 1);
7175         } else if (dev->mc_count < 1) {
7176                 /* Reject all multicast. */
7177                 tg3_set_multi (tp, 0);
7178         } else {
7179                 /* Accept one or more multicast(s). */
7180                 struct dev_mc_list *mclist;
7181                 unsigned int i;
7182                 u32 mc_filter[4] = { 0, };
7183                 u32 regidx;
7184                 u32 bit;
7185                 u32 crc;
7186
7187                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7188                      i++, mclist = mclist->next) {
7189
7190                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7191                         bit = ~crc & 0x7f;
7192                         regidx = (bit & 0x60) >> 5;
7193                         bit &= 0x1f;
7194                         mc_filter[regidx] |= (1 << bit);
7195                 }
7196
7197                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7198                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7199                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7200                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7201         }
7202
7203         if (rx_mode != tp->rx_mode) {
7204                 tp->rx_mode = rx_mode;
7205                 tw32_f(MAC_RX_MODE, rx_mode);
7206                 udelay(10);
7207         }
7208 }
7209
7210 static void tg3_set_rx_mode(struct net_device *dev)
7211 {
7212         struct tg3 *tp = netdev_priv(dev);
7213
7214         if (!netif_running(dev))
7215                 return;
7216
7217         tg3_full_lock(tp, 0);
7218         __tg3_set_rx_mode(dev);
7219         tg3_full_unlock(tp);
7220 }
7221
7222 #define TG3_REGDUMP_LEN         (32 * 1024)
7223
7224 static int tg3_get_regs_len(struct net_device *dev)
7225 {
7226         return TG3_REGDUMP_LEN;
7227 }
7228
7229 static void tg3_get_regs(struct net_device *dev,
7230                 struct ethtool_regs *regs, void *_p)
7231 {
7232         u32 *p = _p;
7233         struct tg3 *tp = netdev_priv(dev);
7234         u8 *orig_p = _p;
7235         int i;
7236
7237         regs->version = 0;
7238
7239         memset(p, 0, TG3_REGDUMP_LEN);
7240
7241         if (tp->link_config.phy_is_low_power)
7242                 return;
7243
7244         tg3_full_lock(tp, 0);
7245
7246 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7247 #define GET_REG32_LOOP(base,len)                \
7248 do {    p = (u32 *)(orig_p + (base));           \
7249         for (i = 0; i < len; i += 4)            \
7250                 __GET_REG32((base) + i);        \
7251 } while (0)
7252 #define GET_REG32_1(reg)                        \
7253 do {    p = (u32 *)(orig_p + (reg));            \
7254         __GET_REG32((reg));                     \
7255 } while (0)
7256
7257         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7258         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7259         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7260         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7261         GET_REG32_1(SNDDATAC_MODE);
7262         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7263         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7264         GET_REG32_1(SNDBDC_MODE);
7265         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7266         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7267         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7268         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7269         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7270         GET_REG32_1(RCVDCC_MODE);
7271         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7272         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7273         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7274         GET_REG32_1(MBFREE_MODE);
7275         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7276         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7277         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7278         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7279         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7280         GET_REG32_1(RX_CPU_MODE);
7281         GET_REG32_1(RX_CPU_STATE);
7282         GET_REG32_1(RX_CPU_PGMCTR);
7283         GET_REG32_1(RX_CPU_HWBKPT);
7284         GET_REG32_1(TX_CPU_MODE);
7285         GET_REG32_1(TX_CPU_STATE);
7286         GET_REG32_1(TX_CPU_PGMCTR);
7287         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7288         GET_REG32_LOOP(FTQ_RESET, 0x120);
7289         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7290         GET_REG32_1(DMAC_MODE);
7291         GET_REG32_LOOP(GRC_MODE, 0x4c);
7292         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7293                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7294
7295 #undef __GET_REG32
7296 #undef GET_REG32_LOOP
7297 #undef GET_REG32_1
7298
7299         tg3_full_unlock(tp);
7300 }
7301
7302 static int tg3_get_eeprom_len(struct net_device *dev)
7303 {
7304         struct tg3 *tp = netdev_priv(dev);
7305
7306         return tp->nvram_size;
7307 }
7308
7309 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7310
7311 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7312 {
7313         struct tg3 *tp = netdev_priv(dev);
7314         int ret;
7315         u8  *pd;
7316         u32 i, offset, len, val, b_offset, b_count;
7317
7318         if (tp->link_config.phy_is_low_power)
7319                 return -EAGAIN;
7320
7321         offset = eeprom->offset;
7322         len = eeprom->len;
7323         eeprom->len = 0;
7324
7325         eeprom->magic = TG3_EEPROM_MAGIC;
7326
7327         if (offset & 3) {
7328                 /* adjustments to start on required 4 byte boundary */
7329                 b_offset = offset & 3;
7330                 b_count = 4 - b_offset;
7331                 if (b_count > len) {
7332                         /* i.e. offset=1 len=2 */
7333                         b_count = len;
7334                 }
7335                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7336                 if (ret)
7337                         return ret;
7338                 val = cpu_to_le32(val);
7339                 memcpy(data, ((char*)&val) + b_offset, b_count);
7340                 len -= b_count;
7341                 offset += b_count;
7342                 eeprom->len += b_count;
7343         }
7344
7345         /* read bytes upto the last 4 byte boundary */
7346         pd = &data[eeprom->len];
7347         for (i = 0; i < (len - (len & 3)); i += 4) {
7348                 ret = tg3_nvram_read(tp, offset + i, &val);
7349                 if (ret) {
7350                         eeprom->len += i;
7351                         return ret;
7352                 }
7353                 val = cpu_to_le32(val);
7354                 memcpy(pd + i, &val, 4);
7355         }
7356         eeprom->len += i;
7357
7358         if (len & 3) {
7359                 /* read last bytes not ending on 4 byte boundary */
7360                 pd = &data[eeprom->len];
7361                 b_count = len & 3;
7362                 b_offset = offset + len - b_count;
7363                 ret = tg3_nvram_read(tp, b_offset, &val);
7364                 if (ret)
7365                         return ret;
7366                 val = cpu_to_le32(val);
7367                 memcpy(pd, ((char*)&val), b_count);
7368                 eeprom->len += b_count;
7369         }
7370         return 0;
7371 }
7372
7373 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7374
7375 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7376 {
7377         struct tg3 *tp = netdev_priv(dev);
7378         int ret;
7379         u32 offset, len, b_offset, odd_len, start, end;
7380         u8 *buf;
7381
7382         if (tp->link_config.phy_is_low_power)
7383                 return -EAGAIN;
7384
7385         if (eeprom->magic != TG3_EEPROM_MAGIC)
7386                 return -EINVAL;
7387
7388         offset = eeprom->offset;
7389         len = eeprom->len;
7390
7391         if ((b_offset = (offset & 3))) {
7392                 /* adjustments to start on required 4 byte boundary */
7393                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7394                 if (ret)
7395                         return ret;
7396                 start = cpu_to_le32(start);
7397                 len += b_offset;
7398                 offset &= ~3;
7399                 if (len < 4)
7400                         len = 4;
7401         }
7402
7403         odd_len = 0;
7404         if (len & 3) {
7405                 /* adjustments to end on required 4 byte boundary */
7406                 odd_len = 1;
7407                 len = (len + 3) & ~3;
7408                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7409                 if (ret)
7410                         return ret;
7411                 end = cpu_to_le32(end);
7412         }
7413
7414         buf = data;
7415         if (b_offset || odd_len) {
7416                 buf = kmalloc(len, GFP_KERNEL);
7417                 if (buf == 0)
7418                         return -ENOMEM;
7419                 if (b_offset)
7420                         memcpy(buf, &start, 4);
7421                 if (odd_len)
7422                         memcpy(buf+len-4, &end, 4);
7423                 memcpy(buf + b_offset, data, eeprom->len);
7424         }
7425
7426         ret = tg3_nvram_write_block(tp, offset, len, buf);
7427
7428         if (buf != data)
7429                 kfree(buf);
7430
7431         return ret;
7432 }
7433
7434 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7435 {
7436         struct tg3 *tp = netdev_priv(dev);
7437   
7438         cmd->supported = (SUPPORTED_Autoneg);
7439
7440         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7441                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7442                                    SUPPORTED_1000baseT_Full);
7443
7444         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7445                 cmd->supported |= (SUPPORTED_100baseT_Half |
7446                                   SUPPORTED_100baseT_Full |
7447                                   SUPPORTED_10baseT_Half |
7448                                   SUPPORTED_10baseT_Full |
7449                                   SUPPORTED_MII);
7450         else
7451                 cmd->supported |= SUPPORTED_FIBRE;
7452   
7453         cmd->advertising = tp->link_config.advertising;
7454         if (netif_running(dev)) {
7455                 cmd->speed = tp->link_config.active_speed;
7456                 cmd->duplex = tp->link_config.active_duplex;
7457         }
7458         cmd->port = 0;
7459         cmd->phy_address = PHY_ADDR;
7460         cmd->transceiver = 0;
7461         cmd->autoneg = tp->link_config.autoneg;
7462         cmd->maxtxpkt = 0;
7463         cmd->maxrxpkt = 0;
7464         return 0;
7465 }
7466   
7467 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7468 {
7469         struct tg3 *tp = netdev_priv(dev);
7470   
7471         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7472                 /* These are the only valid advertisement bits allowed.  */
7473                 if (cmd->autoneg == AUTONEG_ENABLE &&
7474                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7475                                           ADVERTISED_1000baseT_Full |
7476                                           ADVERTISED_Autoneg |
7477                                           ADVERTISED_FIBRE)))
7478                         return -EINVAL;
7479                 /* Fiber can only do SPEED_1000.  */
7480                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7481                          (cmd->speed != SPEED_1000))
7482                         return -EINVAL;
7483         /* Copper cannot force SPEED_1000.  */
7484         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7485                    (cmd->speed == SPEED_1000))
7486                 return -EINVAL;
7487         else if ((cmd->speed == SPEED_1000) &&
7488                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7489                 return -EINVAL;
7490
7491         tg3_full_lock(tp, 0);
7492
7493         tp->link_config.autoneg = cmd->autoneg;
7494         if (cmd->autoneg == AUTONEG_ENABLE) {
7495                 tp->link_config.advertising = cmd->advertising;
7496                 tp->link_config.speed = SPEED_INVALID;
7497                 tp->link_config.duplex = DUPLEX_INVALID;
7498         } else {
7499                 tp->link_config.advertising = 0;
7500                 tp->link_config.speed = cmd->speed;
7501                 tp->link_config.duplex = cmd->duplex;
7502         }
7503   
7504         if (netif_running(dev))
7505                 tg3_setup_phy(tp, 1);
7506
7507         tg3_full_unlock(tp);
7508   
7509         return 0;
7510 }
7511   
7512 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7513 {
7514         struct tg3 *tp = netdev_priv(dev);
7515   
7516         strcpy(info->driver, DRV_MODULE_NAME);
7517         strcpy(info->version, DRV_MODULE_VERSION);
7518         strcpy(info->bus_info, pci_name(tp->pdev));
7519 }
7520   
7521 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7522 {
7523         struct tg3 *tp = netdev_priv(dev);
7524   
7525         wol->supported = WAKE_MAGIC;
7526         wol->wolopts = 0;
7527         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7528                 wol->wolopts = WAKE_MAGIC;
7529         memset(&wol->sopass, 0, sizeof(wol->sopass));
7530 }
7531   
7532 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7533 {
7534         struct tg3 *tp = netdev_priv(dev);
7535   
7536         if (wol->wolopts & ~WAKE_MAGIC)
7537                 return -EINVAL;
7538         if ((wol->wolopts & WAKE_MAGIC) &&
7539             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7540             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7541                 return -EINVAL;
7542   
7543         spin_lock_bh(&tp->lock);
7544         if (wol->wolopts & WAKE_MAGIC)
7545                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7546         else
7547                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7548         spin_unlock_bh(&tp->lock);
7549   
7550         return 0;
7551 }
7552   
7553 static u32 tg3_get_msglevel(struct net_device *dev)
7554 {
7555         struct tg3 *tp = netdev_priv(dev);
7556         return tp->msg_enable;
7557 }
7558   
7559 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7560 {
7561         struct tg3 *tp = netdev_priv(dev);
7562         tp->msg_enable = value;
7563 }
7564   
7565 #if TG3_TSO_SUPPORT != 0
7566 static int tg3_set_tso(struct net_device *dev, u32 value)
7567 {
7568         struct tg3 *tp = netdev_priv(dev);
7569
7570         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7571                 if (value)
7572                         return -EINVAL;
7573                 return 0;
7574         }
7575         return ethtool_op_set_tso(dev, value);
7576 }
7577 #endif
7578   
7579 static int tg3_nway_reset(struct net_device *dev)
7580 {
7581         struct tg3 *tp = netdev_priv(dev);
7582         u32 bmcr;
7583         int r;
7584   
7585         if (!netif_running(dev))
7586                 return -EAGAIN;
7587
7588         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7589                 return -EINVAL;
7590
7591         spin_lock_bh(&tp->lock);
7592         r = -EINVAL;
7593         tg3_readphy(tp, MII_BMCR, &bmcr);
7594         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7595             ((bmcr & BMCR_ANENABLE) ||
7596              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7597                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7598                                            BMCR_ANENABLE);
7599                 r = 0;
7600         }
7601         spin_unlock_bh(&tp->lock);
7602   
7603         return r;
7604 }
7605   
7606 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7607 {
7608         struct tg3 *tp = netdev_priv(dev);
7609   
7610         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7611         ering->rx_mini_max_pending = 0;
7612         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7613                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7614         else
7615                 ering->rx_jumbo_max_pending = 0;
7616
7617         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7618
7619         ering->rx_pending = tp->rx_pending;
7620         ering->rx_mini_pending = 0;
7621         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7622                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7623         else
7624                 ering->rx_jumbo_pending = 0;
7625
7626         ering->tx_pending = tp->tx_pending;
7627 }
7628   
7629 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7630 {
7631         struct tg3 *tp = netdev_priv(dev);
7632         int irq_sync = 0;
7633   
7634         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7635             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7636             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7637                 return -EINVAL;
7638   
7639         if (netif_running(dev)) {
7640                 tg3_netif_stop(tp);
7641                 irq_sync = 1;
7642         }
7643
7644         tg3_full_lock(tp, irq_sync);
7645   
7646         tp->rx_pending = ering->rx_pending;
7647
7648         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7649             tp->rx_pending > 63)
7650                 tp->rx_pending = 63;
7651         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7652         tp->tx_pending = ering->tx_pending;
7653
7654         if (netif_running(dev)) {
7655                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7656                 tg3_init_hw(tp);
7657                 tg3_netif_start(tp);
7658         }
7659
7660         tg3_full_unlock(tp);
7661   
7662         return 0;
7663 }
7664   
7665 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7666 {
7667         struct tg3 *tp = netdev_priv(dev);
7668   
7669         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7670         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7671         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7672 }
7673   
7674 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7675 {
7676         struct tg3 *tp = netdev_priv(dev);
7677         int irq_sync = 0;
7678   
7679         if (netif_running(dev)) {
7680                 tg3_netif_stop(tp);
7681                 irq_sync = 1;
7682         }
7683
7684         tg3_full_lock(tp, irq_sync);
7685
7686         if (epause->autoneg)
7687                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7688         else
7689                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7690         if (epause->rx_pause)
7691                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7692         else
7693                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7694         if (epause->tx_pause)
7695                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7696         else
7697                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7698
7699         if (netif_running(dev)) {
7700                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7701                 tg3_init_hw(tp);
7702                 tg3_netif_start(tp);
7703         }
7704
7705         tg3_full_unlock(tp);
7706   
7707         return 0;
7708 }
7709   
7710 static u32 tg3_get_rx_csum(struct net_device *dev)
7711 {
7712         struct tg3 *tp = netdev_priv(dev);
7713         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7714 }
7715   
7716 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7717 {
7718         struct tg3 *tp = netdev_priv(dev);
7719   
7720         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7721                 if (data != 0)
7722                         return -EINVAL;
7723                 return 0;
7724         }
7725   
7726         spin_lock_bh(&tp->lock);
7727         if (data)
7728                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7729         else
7730                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7731         spin_unlock_bh(&tp->lock);
7732   
7733         return 0;
7734 }
7735   
7736 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7737 {
7738         struct tg3 *tp = netdev_priv(dev);
7739   
7740         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7741                 if (data != 0)
7742                         return -EINVAL;
7743                 return 0;
7744         }
7745   
7746         if (data)
7747                 dev->features |= NETIF_F_IP_CSUM;
7748         else
7749                 dev->features &= ~NETIF_F_IP_CSUM;
7750
7751         return 0;
7752 }
7753
7754 static int tg3_get_stats_count (struct net_device *dev)
7755 {
7756         return TG3_NUM_STATS;
7757 }
7758
7759 static int tg3_get_test_count (struct net_device *dev)
7760 {
7761         return TG3_NUM_TEST;
7762 }
7763
7764 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7765 {
7766         switch (stringset) {
7767         case ETH_SS_STATS:
7768                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7769                 break;
7770         case ETH_SS_TEST:
7771                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7772                 break;
7773         default:
7774                 WARN_ON(1);     /* we need a WARN() */
7775                 break;
7776         }
7777 }
7778
7779 static int tg3_phys_id(struct net_device *dev, u32 data)
7780 {
7781         struct tg3 *tp = netdev_priv(dev);
7782         int i;
7783
7784         if (!netif_running(tp->dev))
7785                 return -EAGAIN;
7786
7787         if (data == 0)
7788                 data = 2;
7789
7790         for (i = 0; i < (data * 2); i++) {
7791                 if ((i % 2) == 0)
7792                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7793                                            LED_CTRL_1000MBPS_ON |
7794                                            LED_CTRL_100MBPS_ON |
7795                                            LED_CTRL_10MBPS_ON |
7796                                            LED_CTRL_TRAFFIC_OVERRIDE |
7797                                            LED_CTRL_TRAFFIC_BLINK |
7798                                            LED_CTRL_TRAFFIC_LED);
7799         
7800                 else
7801                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7802                                            LED_CTRL_TRAFFIC_OVERRIDE);
7803
7804                 if (msleep_interruptible(500))
7805                         break;
7806         }
7807         tw32(MAC_LED_CTRL, tp->led_ctrl);
7808         return 0;
7809 }
7810
7811 static void tg3_get_ethtool_stats (struct net_device *dev,
7812                                    struct ethtool_stats *estats, u64 *tmp_stats)
7813 {
7814         struct tg3 *tp = netdev_priv(dev);
7815         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7816 }
7817
7818 #define NVRAM_TEST_SIZE 0x100
7819 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7820
7821 static int tg3_test_nvram(struct tg3 *tp)
7822 {
7823         u32 *buf, csum, magic;
7824         int i, j, err = 0, size;
7825
7826         if (tg3_nvram_read(tp, 0, &magic) != 0)
7827                 return -EIO;
7828
7829         magic = swab32(magic);
7830         if (magic == TG3_EEPROM_MAGIC)
7831                 size = NVRAM_TEST_SIZE;
7832         else if ((magic & 0xff000000) == 0xa5000000) {
7833                 if ((magic & 0xe00000) == 0x200000)
7834                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
7835                 else
7836                         return 0;
7837         } else
7838                 return -EIO;
7839
7840         buf = kmalloc(size, GFP_KERNEL);
7841         if (buf == NULL)
7842                 return -ENOMEM;
7843
7844         err = -EIO;
7845         for (i = 0, j = 0; i < size; i += 4, j++) {
7846                 u32 val;
7847
7848                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7849                         break;
7850                 buf[j] = cpu_to_le32(val);
7851         }
7852         if (i < size)
7853                 goto out;
7854
7855         /* Selfboot format */
7856         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
7857                 u8 *buf8 = (u8 *) buf, csum8 = 0;
7858
7859                 for (i = 0; i < size; i++)
7860                         csum8 += buf8[i];
7861
7862                 if (csum8 == 0)
7863                         return 0;
7864                 return -EIO;
7865         }
7866
7867         /* Bootstrap checksum at offset 0x10 */
7868         csum = calc_crc((unsigned char *) buf, 0x10);
7869         if(csum != cpu_to_le32(buf[0x10/4]))
7870                 goto out;
7871
7872         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7873         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7874         if (csum != cpu_to_le32(buf[0xfc/4]))
7875                  goto out;
7876
7877         err = 0;
7878
7879 out:
7880         kfree(buf);
7881         return err;
7882 }
7883
7884 #define TG3_SERDES_TIMEOUT_SEC  2
7885 #define TG3_COPPER_TIMEOUT_SEC  6
7886
7887 static int tg3_test_link(struct tg3 *tp)
7888 {
7889         int i, max;
7890
7891         if (!netif_running(tp->dev))
7892                 return -ENODEV;
7893
7894         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7895                 max = TG3_SERDES_TIMEOUT_SEC;
7896         else
7897                 max = TG3_COPPER_TIMEOUT_SEC;
7898
7899         for (i = 0; i < max; i++) {
7900                 if (netif_carrier_ok(tp->dev))
7901                         return 0;
7902
7903                 if (msleep_interruptible(1000))
7904                         break;
7905         }
7906
7907         return -EIO;
7908 }
7909
7910 /* Only test the commonly used registers */
7911 static const int tg3_test_registers(struct tg3 *tp)
7912 {
7913         int i, is_5705;
7914         u32 offset, read_mask, write_mask, val, save_val, read_val;
7915         static struct {
7916                 u16 offset;
7917                 u16 flags;
7918 #define TG3_FL_5705     0x1
7919 #define TG3_FL_NOT_5705 0x2
7920 #define TG3_FL_NOT_5788 0x4
7921                 u32 read_mask;
7922                 u32 write_mask;
7923         } reg_tbl[] = {
7924                 /* MAC Control Registers */
7925                 { MAC_MODE, TG3_FL_NOT_5705,
7926                         0x00000000, 0x00ef6f8c },
7927                 { MAC_MODE, TG3_FL_5705,
7928                         0x00000000, 0x01ef6b8c },
7929                 { MAC_STATUS, TG3_FL_NOT_5705,
7930                         0x03800107, 0x00000000 },
7931                 { MAC_STATUS, TG3_FL_5705,
7932                         0x03800100, 0x00000000 },
7933                 { MAC_ADDR_0_HIGH, 0x0000,
7934                         0x00000000, 0x0000ffff },
7935                 { MAC_ADDR_0_LOW, 0x0000,
7936                         0x00000000, 0xffffffff },
7937                 { MAC_RX_MTU_SIZE, 0x0000,
7938                         0x00000000, 0x0000ffff },
7939                 { MAC_TX_MODE, 0x0000,
7940                         0x00000000, 0x00000070 },
7941                 { MAC_TX_LENGTHS, 0x0000,
7942                         0x00000000, 0x00003fff },
7943                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7944                         0x00000000, 0x000007fc },
7945                 { MAC_RX_MODE, TG3_FL_5705,
7946                         0x00000000, 0x000007dc },
7947                 { MAC_HASH_REG_0, 0x0000,
7948                         0x00000000, 0xffffffff },
7949                 { MAC_HASH_REG_1, 0x0000,
7950                         0x00000000, 0xffffffff },
7951                 { MAC_HASH_REG_2, 0x0000,
7952                         0x00000000, 0xffffffff },
7953                 { MAC_HASH_REG_3, 0x0000,
7954                         0x00000000, 0xffffffff },
7955
7956                 /* Receive Data and Receive BD Initiator Control Registers. */
7957                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7958                         0x00000000, 0xffffffff },
7959                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7960                         0x00000000, 0xffffffff },
7961                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7962                         0x00000000, 0x00000003 },
7963                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7964                         0x00000000, 0xffffffff },
7965                 { RCVDBDI_STD_BD+0, 0x0000,
7966                         0x00000000, 0xffffffff },
7967                 { RCVDBDI_STD_BD+4, 0x0000,
7968                         0x00000000, 0xffffffff },
7969                 { RCVDBDI_STD_BD+8, 0x0000,
7970                         0x00000000, 0xffff0002 },
7971                 { RCVDBDI_STD_BD+0xc, 0x0000,
7972                         0x00000000, 0xffffffff },
7973         
7974                 /* Receive BD Initiator Control Registers. */
7975                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7976                         0x00000000, 0xffffffff },
7977                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7978                         0x00000000, 0x000003ff },
7979                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7980                         0x00000000, 0xffffffff },
7981         
7982                 /* Host Coalescing Control Registers. */
7983                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7984                         0x00000000, 0x00000004 },
7985                 { HOSTCC_MODE, TG3_FL_5705,
7986                         0x00000000, 0x000000f6 },
7987                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7988                         0x00000000, 0xffffffff },
7989                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7990                         0x00000000, 0x000003ff },
7991                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7992                         0x00000000, 0xffffffff },
7993                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7994                         0x00000000, 0x000003ff },
7995                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7996                         0x00000000, 0xffffffff },
7997                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7998                         0x00000000, 0x000000ff },
7999                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8000                         0x00000000, 0xffffffff },
8001                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8002                         0x00000000, 0x000000ff },
8003                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8004                         0x00000000, 0xffffffff },
8005                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8006                         0x00000000, 0xffffffff },
8007                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8008                         0x00000000, 0xffffffff },
8009                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8010                         0x00000000, 0x000000ff },
8011                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8012                         0x00000000, 0xffffffff },
8013                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8014                         0x00000000, 0x000000ff },
8015                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8016                         0x00000000, 0xffffffff },
8017                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8018                         0x00000000, 0xffffffff },
8019                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8020                         0x00000000, 0xffffffff },
8021                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8022                         0x00000000, 0xffffffff },
8023                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8024                         0x00000000, 0xffffffff },
8025                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8026                         0xffffffff, 0x00000000 },
8027                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8028                         0xffffffff, 0x00000000 },
8029
8030                 /* Buffer Manager Control Registers. */
8031                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8032                         0x00000000, 0x007fff80 },
8033                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8034                         0x00000000, 0x007fffff },
8035                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8036                         0x00000000, 0x0000003f },
8037                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8038                         0x00000000, 0x000001ff },
8039                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8040                         0x00000000, 0x000001ff },
8041                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8042                         0xffffffff, 0x00000000 },
8043                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8044                         0xffffffff, 0x00000000 },
8045         
8046                 /* Mailbox Registers */
8047                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8048                         0x00000000, 0x000001ff },
8049                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8050                         0x00000000, 0x000001ff },
8051                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8052                         0x00000000, 0x000007ff },
8053                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8054                         0x00000000, 0x000001ff },
8055
8056                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8057         };
8058
8059         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8060                 is_5705 = 1;
8061         else
8062                 is_5705 = 0;
8063
8064         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8065                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8066                         continue;
8067
8068                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8069                         continue;
8070
8071                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8072                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8073                         continue;
8074
8075                 offset = (u32) reg_tbl[i].offset;
8076                 read_mask = reg_tbl[i].read_mask;
8077                 write_mask = reg_tbl[i].write_mask;
8078
8079                 /* Save the original register content */
8080                 save_val = tr32(offset);
8081
8082                 /* Determine the read-only value. */
8083                 read_val = save_val & read_mask;
8084
8085                 /* Write zero to the register, then make sure the read-only bits
8086                  * are not changed and the read/write bits are all zeros.
8087                  */
8088                 tw32(offset, 0);
8089
8090                 val = tr32(offset);
8091
8092                 /* Test the read-only and read/write bits. */
8093                 if (((val & read_mask) != read_val) || (val & write_mask))
8094                         goto out;
8095
8096                 /* Write ones to all the bits defined by RdMask and WrMask, then
8097                  * make sure the read-only bits are not changed and the
8098                  * read/write bits are all ones.
8099                  */
8100                 tw32(offset, read_mask | write_mask);
8101
8102                 val = tr32(offset);
8103
8104                 /* Test the read-only bits. */
8105                 if ((val & read_mask) != read_val)
8106                         goto out;
8107
8108                 /* Test the read/write bits. */
8109                 if ((val & write_mask) != write_mask)
8110                         goto out;
8111
8112                 tw32(offset, save_val);
8113         }
8114
8115         return 0;
8116
8117 out:
8118         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8119         tw32(offset, save_val);
8120         return -EIO;
8121 }
8122
8123 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8124 {
8125         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8126         int i;
8127         u32 j;
8128
8129         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8130                 for (j = 0; j < len; j += 4) {
8131                         u32 val;
8132
8133                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8134                         tg3_read_mem(tp, offset + j, &val);
8135                         if (val != test_pattern[i])
8136                                 return -EIO;
8137                 }
8138         }
8139         return 0;
8140 }
8141
8142 static int tg3_test_memory(struct tg3 *tp)
8143 {
8144         static struct mem_entry {
8145                 u32 offset;
8146                 u32 len;
8147         } mem_tbl_570x[] = {
8148                 { 0x00000000, 0x00b50},
8149                 { 0x00002000, 0x1c000},
8150                 { 0xffffffff, 0x00000}
8151         }, mem_tbl_5705[] = {
8152                 { 0x00000100, 0x0000c},
8153                 { 0x00000200, 0x00008},
8154                 { 0x00004000, 0x00800},
8155                 { 0x00006000, 0x01000},
8156                 { 0x00008000, 0x02000},
8157                 { 0x00010000, 0x0e000},
8158                 { 0xffffffff, 0x00000}
8159         };
8160         struct mem_entry *mem_tbl;
8161         int err = 0;
8162         int i;
8163
8164         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8165                 mem_tbl = mem_tbl_5705;
8166         else
8167                 mem_tbl = mem_tbl_570x;
8168
8169         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8170                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8171                     mem_tbl[i].len)) != 0)
8172                         break;
8173         }
8174         
8175         return err;
8176 }
8177
8178 #define TG3_MAC_LOOPBACK        0
8179 #define TG3_PHY_LOOPBACK        1
8180
8181 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8182 {
8183         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8184         u32 desc_idx;
8185         struct sk_buff *skb, *rx_skb;
8186         u8 *tx_data;
8187         dma_addr_t map;
8188         int num_pkts, tx_len, rx_len, i, err;
8189         struct tg3_rx_buffer_desc *desc;
8190
8191         if (loopback_mode == TG3_MAC_LOOPBACK) {
8192                 /* HW errata - mac loopback fails in some cases on 5780.
8193                  * Normal traffic and PHY loopback are not affected by
8194                  * errata.
8195                  */
8196                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8197                         return 0;
8198
8199                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8200                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8201                            MAC_MODE_PORT_MODE_GMII;
8202                 tw32(MAC_MODE, mac_mode);
8203         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8204                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8205                                            BMCR_SPEED1000);
8206                 udelay(40);
8207                 /* reset to prevent losing 1st rx packet intermittently */
8208                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8209                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8210                         udelay(10);
8211                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8212                 }
8213                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8214                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8215                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8216                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8217                 tw32(MAC_MODE, mac_mode);
8218         }
8219         else
8220                 return -EINVAL;
8221
8222         err = -EIO;
8223
8224         tx_len = 1514;
8225         skb = dev_alloc_skb(tx_len);
8226         tx_data = skb_put(skb, tx_len);
8227         memcpy(tx_data, tp->dev->dev_addr, 6);
8228         memset(tx_data + 6, 0x0, 8);
8229
8230         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8231
8232         for (i = 14; i < tx_len; i++)
8233                 tx_data[i] = (u8) (i & 0xff);
8234
8235         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8236
8237         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8238              HOSTCC_MODE_NOW);
8239
8240         udelay(10);
8241
8242         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8243
8244         num_pkts = 0;
8245
8246         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8247
8248         tp->tx_prod++;
8249         num_pkts++;
8250
8251         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8252                      tp->tx_prod);
8253         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8254
8255         udelay(10);
8256
8257         for (i = 0; i < 10; i++) {
8258                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8259                        HOSTCC_MODE_NOW);
8260
8261                 udelay(10);
8262
8263                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8264                 rx_idx = tp->hw_status->idx[0].rx_producer;
8265                 if ((tx_idx == tp->tx_prod) &&
8266                     (rx_idx == (rx_start_idx + num_pkts)))
8267                         break;
8268         }
8269
8270         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8271         dev_kfree_skb(skb);
8272
8273         if (tx_idx != tp->tx_prod)
8274                 goto out;
8275
8276         if (rx_idx != rx_start_idx + num_pkts)
8277                 goto out;
8278
8279         desc = &tp->rx_rcb[rx_start_idx];
8280         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8281         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8282         if (opaque_key != RXD_OPAQUE_RING_STD)
8283                 goto out;
8284
8285         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8286             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8287                 goto out;
8288
8289         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8290         if (rx_len != tx_len)
8291                 goto out;
8292
8293         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8294
8295         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8296         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8297
8298         for (i = 14; i < tx_len; i++) {
8299                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8300                         goto out;
8301         }
8302         err = 0;
8303         
8304         /* tg3_free_rings will unmap and free the rx_skb */
8305 out:
8306         return err;
8307 }
8308
8309 #define TG3_MAC_LOOPBACK_FAILED         1
8310 #define TG3_PHY_LOOPBACK_FAILED         2
8311 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8312                                          TG3_PHY_LOOPBACK_FAILED)
8313
8314 static int tg3_test_loopback(struct tg3 *tp)
8315 {
8316         int err = 0;
8317
8318         if (!netif_running(tp->dev))
8319                 return TG3_LOOPBACK_FAILED;
8320
8321         tg3_reset_hw(tp);
8322
8323         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8324                 err |= TG3_MAC_LOOPBACK_FAILED;
8325         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8326                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8327                         err |= TG3_PHY_LOOPBACK_FAILED;
8328         }
8329
8330         return err;
8331 }
8332
8333 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8334                           u64 *data)
8335 {
8336         struct tg3 *tp = netdev_priv(dev);
8337
8338         if (tp->link_config.phy_is_low_power)
8339                 tg3_set_power_state(tp, PCI_D0);
8340
8341         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8342
8343         if (tg3_test_nvram(tp) != 0) {
8344                 etest->flags |= ETH_TEST_FL_FAILED;
8345                 data[0] = 1;
8346         }
8347         if (tg3_test_link(tp) != 0) {
8348                 etest->flags |= ETH_TEST_FL_FAILED;
8349                 data[1] = 1;
8350         }
8351         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8352                 int err, irq_sync = 0;
8353
8354                 if (netif_running(dev)) {
8355                         tg3_netif_stop(tp);
8356                         irq_sync = 1;
8357                 }
8358
8359                 tg3_full_lock(tp, irq_sync);
8360
8361                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8362                 err = tg3_nvram_lock(tp);
8363                 tg3_halt_cpu(tp, RX_CPU_BASE);
8364                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8365                         tg3_halt_cpu(tp, TX_CPU_BASE);
8366                 if (!err)
8367                         tg3_nvram_unlock(tp);
8368
8369                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8370                         tg3_phy_reset(tp);
8371
8372                 if (tg3_test_registers(tp) != 0) {
8373                         etest->flags |= ETH_TEST_FL_FAILED;
8374                         data[2] = 1;
8375                 }
8376                 if (tg3_test_memory(tp) != 0) {
8377                         etest->flags |= ETH_TEST_FL_FAILED;
8378                         data[3] = 1;
8379                 }
8380                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8381                         etest->flags |= ETH_TEST_FL_FAILED;
8382
8383                 tg3_full_unlock(tp);
8384
8385                 if (tg3_test_interrupt(tp) != 0) {
8386                         etest->flags |= ETH_TEST_FL_FAILED;
8387                         data[5] = 1;
8388                 }
8389
8390                 tg3_full_lock(tp, 0);
8391
8392                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8393                 if (netif_running(dev)) {
8394                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8395                         tg3_init_hw(tp);
8396                         tg3_netif_start(tp);
8397                 }
8398
8399                 tg3_full_unlock(tp);
8400         }
8401         if (tp->link_config.phy_is_low_power)
8402                 tg3_set_power_state(tp, PCI_D3hot);
8403
8404 }
8405
8406 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8407 {
8408         struct mii_ioctl_data *data = if_mii(ifr);
8409         struct tg3 *tp = netdev_priv(dev);
8410         int err;
8411
8412         switch(cmd) {
8413         case SIOCGMIIPHY:
8414                 data->phy_id = PHY_ADDR;
8415
8416                 /* fallthru */
8417         case SIOCGMIIREG: {
8418                 u32 mii_regval;
8419
8420                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8421                         break;                  /* We have no PHY */
8422
8423                 if (tp->link_config.phy_is_low_power)
8424                         return -EAGAIN;
8425
8426                 spin_lock_bh(&tp->lock);
8427                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8428                 spin_unlock_bh(&tp->lock);
8429
8430                 data->val_out = mii_regval;
8431
8432                 return err;
8433         }
8434
8435         case SIOCSMIIREG:
8436                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8437                         break;                  /* We have no PHY */
8438
8439                 if (!capable(CAP_NET_ADMIN))
8440                         return -EPERM;
8441
8442                 if (tp->link_config.phy_is_low_power)
8443                         return -EAGAIN;
8444
8445                 spin_lock_bh(&tp->lock);
8446                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8447                 spin_unlock_bh(&tp->lock);
8448
8449                 return err;
8450
8451         default:
8452                 /* do nothing */
8453                 break;
8454         }
8455         return -EOPNOTSUPP;
8456 }
8457
8458 #if TG3_VLAN_TAG_USED
8459 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8460 {
8461         struct tg3 *tp = netdev_priv(dev);
8462
8463         tg3_full_lock(tp, 0);
8464
8465         tp->vlgrp = grp;
8466
8467         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8468         __tg3_set_rx_mode(dev);
8469
8470         tg3_full_unlock(tp);
8471 }
8472
8473 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8474 {
8475         struct tg3 *tp = netdev_priv(dev);
8476
8477         tg3_full_lock(tp, 0);
8478         if (tp->vlgrp)
8479                 tp->vlgrp->vlan_devices[vid] = NULL;
8480         tg3_full_unlock(tp);
8481 }
8482 #endif
8483
8484 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8485 {
8486         struct tg3 *tp = netdev_priv(dev);
8487
8488         memcpy(ec, &tp->coal, sizeof(*ec));
8489         return 0;
8490 }
8491
8492 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8493 {
8494         struct tg3 *tp = netdev_priv(dev);
8495         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8496         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8497
8498         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8499                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8500                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8501                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8502                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8503         }
8504
8505         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8506             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8507             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8508             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8509             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8510             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8511             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8512             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8513             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8514             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8515                 return -EINVAL;
8516
8517         /* No rx interrupts will be generated if both are zero */
8518         if ((ec->rx_coalesce_usecs == 0) &&
8519             (ec->rx_max_coalesced_frames == 0))
8520                 return -EINVAL;
8521
8522         /* No tx interrupts will be generated if both are zero */
8523         if ((ec->tx_coalesce_usecs == 0) &&
8524             (ec->tx_max_coalesced_frames == 0))
8525                 return -EINVAL;
8526
8527         /* Only copy relevant parameters, ignore all others. */
8528         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8529         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8530         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8531         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8532         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8533         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8534         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8535         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8536         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8537
8538         if (netif_running(dev)) {
8539                 tg3_full_lock(tp, 0);
8540                 __tg3_set_coalesce(tp, &tp->coal);
8541                 tg3_full_unlock(tp);
8542         }
8543         return 0;
8544 }
8545
8546 static struct ethtool_ops tg3_ethtool_ops = {
8547         .get_settings           = tg3_get_settings,
8548         .set_settings           = tg3_set_settings,
8549         .get_drvinfo            = tg3_get_drvinfo,
8550         .get_regs_len           = tg3_get_regs_len,
8551         .get_regs               = tg3_get_regs,
8552         .get_wol                = tg3_get_wol,
8553         .set_wol                = tg3_set_wol,
8554         .get_msglevel           = tg3_get_msglevel,
8555         .set_msglevel           = tg3_set_msglevel,
8556         .nway_reset             = tg3_nway_reset,
8557         .get_link               = ethtool_op_get_link,
8558         .get_eeprom_len         = tg3_get_eeprom_len,
8559         .get_eeprom             = tg3_get_eeprom,
8560         .set_eeprom             = tg3_set_eeprom,
8561         .get_ringparam          = tg3_get_ringparam,
8562         .set_ringparam          = tg3_set_ringparam,
8563         .get_pauseparam         = tg3_get_pauseparam,
8564         .set_pauseparam         = tg3_set_pauseparam,
8565         .get_rx_csum            = tg3_get_rx_csum,
8566         .set_rx_csum            = tg3_set_rx_csum,
8567         .get_tx_csum            = ethtool_op_get_tx_csum,
8568         .set_tx_csum            = tg3_set_tx_csum,
8569         .get_sg                 = ethtool_op_get_sg,
8570         .set_sg                 = ethtool_op_set_sg,
8571 #if TG3_TSO_SUPPORT != 0
8572         .get_tso                = ethtool_op_get_tso,
8573         .set_tso                = tg3_set_tso,
8574 #endif
8575         .self_test_count        = tg3_get_test_count,
8576         .self_test              = tg3_self_test,
8577         .get_strings            = tg3_get_strings,
8578         .phys_id                = tg3_phys_id,
8579         .get_stats_count        = tg3_get_stats_count,
8580         .get_ethtool_stats      = tg3_get_ethtool_stats,
8581         .get_coalesce           = tg3_get_coalesce,
8582         .set_coalesce           = tg3_set_coalesce,
8583         .get_perm_addr          = ethtool_op_get_perm_addr,
8584 };
8585
8586 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8587 {
8588         u32 cursize, val, magic;
8589
8590         tp->nvram_size = EEPROM_CHIP_SIZE;
8591
8592         if (tg3_nvram_read(tp, 0, &val) != 0)
8593                 return;
8594
8595         magic = swab32(val);
8596         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8597                 return;
8598
8599         /*
8600          * Size the chip by reading offsets at increasing powers of two.
8601          * When we encounter our validation signature, we know the addressing
8602          * has wrapped around, and thus have our chip size.
8603          */
8604         cursize = 0x10;
8605
8606         while (cursize < tp->nvram_size) {
8607                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8608                         return;
8609
8610                 if (swab32(val) == magic)
8611                         break;
8612
8613                 cursize <<= 1;
8614         }
8615
8616         tp->nvram_size = cursize;
8617 }
8618                 
8619 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8620 {
8621         u32 val;
8622
8623         if (tg3_nvram_read(tp, 0, &val) != 0)
8624                 return;
8625
8626         /* Selfboot format */
8627         if (swab32(val) != TG3_EEPROM_MAGIC) {
8628                 tg3_get_eeprom_size(tp);
8629                 return;
8630         }
8631
8632         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8633                 if (val != 0) {
8634                         tp->nvram_size = (val >> 16) * 1024;
8635                         return;
8636                 }
8637         }
8638         tp->nvram_size = 0x20000;
8639 }
8640
8641 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8642 {
8643         u32 nvcfg1;
8644
8645         nvcfg1 = tr32(NVRAM_CFG1);
8646         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8647                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8648         }
8649         else {
8650                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8651                 tw32(NVRAM_CFG1, nvcfg1);
8652         }
8653
8654         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8655             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8656                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8657                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8658                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8659                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8660                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8661                                 break;
8662                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8663                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8664                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8665                                 break;
8666                         case FLASH_VENDOR_ATMEL_EEPROM:
8667                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8668                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8669                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8670                                 break;
8671                         case FLASH_VENDOR_ST:
8672                                 tp->nvram_jedecnum = JEDEC_ST;
8673                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8674                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8675                                 break;
8676                         case FLASH_VENDOR_SAIFUN:
8677                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8678                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8679                                 break;
8680                         case FLASH_VENDOR_SST_SMALL:
8681                         case FLASH_VENDOR_SST_LARGE:
8682                                 tp->nvram_jedecnum = JEDEC_SST;
8683                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8684                                 break;
8685                 }
8686         }
8687         else {
8688                 tp->nvram_jedecnum = JEDEC_ATMEL;
8689                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8690                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8691         }
8692 }
8693
8694 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8695 {
8696         u32 nvcfg1;
8697
8698         nvcfg1 = tr32(NVRAM_CFG1);
8699
8700         /* NVRAM protection for TPM */
8701         if (nvcfg1 & (1 << 27))
8702                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8703
8704         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8705                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8706                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8707                         tp->nvram_jedecnum = JEDEC_ATMEL;
8708                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8709                         break;
8710                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8711                         tp->nvram_jedecnum = JEDEC_ATMEL;
8712                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8713                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8714                         break;
8715                 case FLASH_5752VENDOR_ST_M45PE10:
8716                 case FLASH_5752VENDOR_ST_M45PE20:
8717                 case FLASH_5752VENDOR_ST_M45PE40:
8718                         tp->nvram_jedecnum = JEDEC_ST;
8719                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8720                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8721                         break;
8722         }
8723
8724         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8725                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8726                         case FLASH_5752PAGE_SIZE_256:
8727                                 tp->nvram_pagesize = 256;
8728                                 break;
8729                         case FLASH_5752PAGE_SIZE_512:
8730                                 tp->nvram_pagesize = 512;
8731                                 break;
8732                         case FLASH_5752PAGE_SIZE_1K:
8733                                 tp->nvram_pagesize = 1024;
8734                                 break;
8735                         case FLASH_5752PAGE_SIZE_2K:
8736                                 tp->nvram_pagesize = 2048;
8737                                 break;
8738                         case FLASH_5752PAGE_SIZE_4K:
8739                                 tp->nvram_pagesize = 4096;
8740                                 break;
8741                         case FLASH_5752PAGE_SIZE_264:
8742                                 tp->nvram_pagesize = 264;
8743                                 break;
8744                 }
8745         }
8746         else {
8747                 /* For eeprom, set pagesize to maximum eeprom size */
8748                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8749
8750                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8751                 tw32(NVRAM_CFG1, nvcfg1);
8752         }
8753 }
8754
8755 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8756 {
8757         u32 nvcfg1;
8758
8759         nvcfg1 = tr32(NVRAM_CFG1);
8760
8761         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8762                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8763                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8764                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8765                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8766                         tp->nvram_jedecnum = JEDEC_ATMEL;
8767                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8768                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8769
8770                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8771                         tw32(NVRAM_CFG1, nvcfg1);
8772                         break;
8773                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8774                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8775                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8776                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8777                         tp->nvram_jedecnum = JEDEC_ATMEL;
8778                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8779                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8780                         tp->nvram_pagesize = 264;
8781                         break;
8782                 case FLASH_5752VENDOR_ST_M45PE10:
8783                 case FLASH_5752VENDOR_ST_M45PE20:
8784                 case FLASH_5752VENDOR_ST_M45PE40:
8785                         tp->nvram_jedecnum = JEDEC_ST;
8786                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8787                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8788                         tp->nvram_pagesize = 256;
8789                         break;
8790         }
8791 }
8792
8793 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8794 static void __devinit tg3_nvram_init(struct tg3 *tp)
8795 {
8796         int j;
8797
8798         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8799                 return;
8800
8801         tw32_f(GRC_EEPROM_ADDR,
8802              (EEPROM_ADDR_FSM_RESET |
8803               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8804                EEPROM_ADDR_CLKPERD_SHIFT)));
8805
8806         /* XXX schedule_timeout() ... */
8807         for (j = 0; j < 100; j++)
8808                 udelay(10);
8809
8810         /* Enable seeprom accesses. */
8811         tw32_f(GRC_LOCAL_CTRL,
8812              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8813         udelay(100);
8814
8815         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8816             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8817                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8818
8819                 if (tg3_nvram_lock(tp)) {
8820                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8821                                "tg3_nvram_init failed.\n", tp->dev->name);
8822                         return;
8823                 }
8824                 tg3_enable_nvram_access(tp);
8825
8826                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8827                         tg3_get_5752_nvram_info(tp);
8828                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8829                         tg3_get_5787_nvram_info(tp);
8830                 else
8831                         tg3_get_nvram_info(tp);
8832
8833                 tg3_get_nvram_size(tp);
8834
8835                 tg3_disable_nvram_access(tp);
8836                 tg3_nvram_unlock(tp);
8837
8838         } else {
8839                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8840
8841                 tg3_get_eeprom_size(tp);
8842         }
8843 }
8844
8845 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8846                                         u32 offset, u32 *val)
8847 {
8848         u32 tmp;
8849         int i;
8850
8851         if (offset > EEPROM_ADDR_ADDR_MASK ||
8852             (offset % 4) != 0)
8853                 return -EINVAL;
8854
8855         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8856                                         EEPROM_ADDR_DEVID_MASK |
8857                                         EEPROM_ADDR_READ);
8858         tw32(GRC_EEPROM_ADDR,
8859              tmp |
8860              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8861              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8862               EEPROM_ADDR_ADDR_MASK) |
8863              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8864
8865         for (i = 0; i < 10000; i++) {
8866                 tmp = tr32(GRC_EEPROM_ADDR);
8867
8868                 if (tmp & EEPROM_ADDR_COMPLETE)
8869                         break;
8870                 udelay(100);
8871         }
8872         if (!(tmp & EEPROM_ADDR_COMPLETE))
8873                 return -EBUSY;
8874
8875         *val = tr32(GRC_EEPROM_DATA);
8876         return 0;
8877 }
8878
8879 #define NVRAM_CMD_TIMEOUT 10000
8880
8881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8882 {
8883         int i;
8884
8885         tw32(NVRAM_CMD, nvram_cmd);
8886         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8887                 udelay(10);
8888                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8889                         udelay(10);
8890                         break;
8891                 }
8892         }
8893         if (i == NVRAM_CMD_TIMEOUT) {
8894                 return -EBUSY;
8895         }
8896         return 0;
8897 }
8898
8899 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8900 {
8901         int ret;
8902
8903         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8904                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8905                 return -EINVAL;
8906         }
8907
8908         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8909                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8910
8911         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8912                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8913                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8914
8915                 offset = ((offset / tp->nvram_pagesize) <<
8916                           ATMEL_AT45DB0X1B_PAGE_POS) +
8917                         (offset % tp->nvram_pagesize);
8918         }
8919
8920         if (offset > NVRAM_ADDR_MSK)
8921                 return -EINVAL;
8922
8923         ret = tg3_nvram_lock(tp);
8924         if (ret)
8925                 return ret;
8926
8927         tg3_enable_nvram_access(tp);
8928
8929         tw32(NVRAM_ADDR, offset);
8930         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8931                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8932
8933         if (ret == 0)
8934                 *val = swab32(tr32(NVRAM_RDDATA));
8935
8936         tg3_disable_nvram_access(tp);
8937
8938         tg3_nvram_unlock(tp);
8939
8940         return ret;
8941 }
8942
8943 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8944                                     u32 offset, u32 len, u8 *buf)
8945 {
8946         int i, j, rc = 0;
8947         u32 val;
8948
8949         for (i = 0; i < len; i += 4) {
8950                 u32 addr, data;
8951
8952                 addr = offset + i;
8953
8954                 memcpy(&data, buf + i, 4);
8955
8956                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8957
8958                 val = tr32(GRC_EEPROM_ADDR);
8959                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8960
8961                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8962                         EEPROM_ADDR_READ);
8963                 tw32(GRC_EEPROM_ADDR, val |
8964                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8965                         (addr & EEPROM_ADDR_ADDR_MASK) |
8966                         EEPROM_ADDR_START |
8967                         EEPROM_ADDR_WRITE);
8968                 
8969                 for (j = 0; j < 10000; j++) {
8970                         val = tr32(GRC_EEPROM_ADDR);
8971
8972                         if (val & EEPROM_ADDR_COMPLETE)
8973                                 break;
8974                         udelay(100);
8975                 }
8976                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8977                         rc = -EBUSY;
8978                         break;
8979                 }
8980         }
8981
8982         return rc;
8983 }
8984
8985 /* offset and length are dword aligned */
8986 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8987                 u8 *buf)
8988 {
8989         int ret = 0;
8990         u32 pagesize = tp->nvram_pagesize;
8991         u32 pagemask = pagesize - 1;
8992         u32 nvram_cmd;
8993         u8 *tmp;
8994
8995         tmp = kmalloc(pagesize, GFP_KERNEL);
8996         if (tmp == NULL)
8997                 return -ENOMEM;
8998
8999         while (len) {
9000                 int j;
9001                 u32 phy_addr, page_off, size;
9002
9003                 phy_addr = offset & ~pagemask;
9004         
9005                 for (j = 0; j < pagesize; j += 4) {
9006                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9007                                                 (u32 *) (tmp + j))))
9008                                 break;
9009                 }
9010                 if (ret)
9011                         break;
9012
9013                 page_off = offset & pagemask;
9014                 size = pagesize;
9015                 if (len < size)
9016                         size = len;
9017
9018                 len -= size;
9019
9020                 memcpy(tmp + page_off, buf, size);
9021
9022                 offset = offset + (pagesize - page_off);
9023
9024                 tg3_enable_nvram_access(tp);
9025
9026                 /*
9027                  * Before we can erase the flash page, we need
9028                  * to issue a special "write enable" command.
9029                  */
9030                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9031
9032                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9033                         break;
9034
9035                 /* Erase the target page */
9036                 tw32(NVRAM_ADDR, phy_addr);
9037
9038                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9039                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9040
9041                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9042                         break;
9043
9044                 /* Issue another write enable to start the write. */
9045                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9046
9047                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9048                         break;
9049
9050                 for (j = 0; j < pagesize; j += 4) {
9051                         u32 data;
9052
9053                         data = *((u32 *) (tmp + j));
9054                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9055
9056                         tw32(NVRAM_ADDR, phy_addr + j);
9057
9058                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9059                                 NVRAM_CMD_WR;
9060
9061                         if (j == 0)
9062                                 nvram_cmd |= NVRAM_CMD_FIRST;
9063                         else if (j == (pagesize - 4))
9064                                 nvram_cmd |= NVRAM_CMD_LAST;
9065
9066                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9067                                 break;
9068                 }
9069                 if (ret)
9070                         break;
9071         }
9072
9073         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9074         tg3_nvram_exec_cmd(tp, nvram_cmd);
9075
9076         kfree(tmp);
9077
9078         return ret;
9079 }
9080
9081 /* offset and length are dword aligned */
9082 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9083                 u8 *buf)
9084 {
9085         int i, ret = 0;
9086
9087         for (i = 0; i < len; i += 4, offset += 4) {
9088                 u32 data, page_off, phy_addr, nvram_cmd;
9089
9090                 memcpy(&data, buf + i, 4);
9091                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9092
9093                 page_off = offset % tp->nvram_pagesize;
9094
9095                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9096                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
9097
9098                         phy_addr = ((offset / tp->nvram_pagesize) <<
9099                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
9100                 }
9101                 else {
9102                         phy_addr = offset;
9103                 }
9104
9105                 tw32(NVRAM_ADDR, phy_addr);
9106
9107                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9108
9109                 if ((page_off == 0) || (i == 0))
9110                         nvram_cmd |= NVRAM_CMD_FIRST;
9111                 else if (page_off == (tp->nvram_pagesize - 4))
9112                         nvram_cmd |= NVRAM_CMD_LAST;
9113
9114                 if (i == (len - 4))
9115                         nvram_cmd |= NVRAM_CMD_LAST;
9116
9117                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9118                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9119                     (tp->nvram_jedecnum == JEDEC_ST) &&
9120                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9121
9122                         if ((ret = tg3_nvram_exec_cmd(tp,
9123                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9124                                 NVRAM_CMD_DONE)))
9125
9126                                 break;
9127                 }
9128                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9129                         /* We always do complete word writes to eeprom. */
9130                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9131                 }
9132
9133                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9134                         break;
9135         }
9136         return ret;
9137 }
9138
9139 /* offset and length are dword aligned */
9140 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9141 {
9142         int ret;
9143
9144         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9145                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9146                 return -EINVAL;
9147         }
9148
9149         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9150                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9151                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9152                 udelay(40);
9153         }
9154
9155         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9156                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9157         }
9158         else {
9159                 u32 grc_mode;
9160
9161                 ret = tg3_nvram_lock(tp);
9162                 if (ret)
9163                         return ret;
9164
9165                 tg3_enable_nvram_access(tp);
9166                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9167                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9168                         tw32(NVRAM_WRITE1, 0x406);
9169
9170                 grc_mode = tr32(GRC_MODE);
9171                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9172
9173                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9174                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9175
9176                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9177                                 buf);
9178                 }
9179                 else {
9180                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9181                                 buf);
9182                 }
9183
9184                 grc_mode = tr32(GRC_MODE);
9185                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9186
9187                 tg3_disable_nvram_access(tp);
9188                 tg3_nvram_unlock(tp);
9189         }
9190
9191         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9192                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9193                 udelay(40);
9194         }
9195
9196         return ret;
9197 }
9198
9199 struct subsys_tbl_ent {
9200         u16 subsys_vendor, subsys_devid;
9201         u32 phy_id;
9202 };
9203
9204 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9205         /* Broadcom boards. */
9206         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9207         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9208         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9209         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9210         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9211         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9212         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9213         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9214         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9215         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9216         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9217
9218         /* 3com boards. */
9219         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9220         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9221         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9222         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9223         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9224
9225         /* DELL boards. */
9226         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9227         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9228         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9229         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9230
9231         /* Compaq boards. */
9232         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9233         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9234         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9235         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9236         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9237
9238         /* IBM boards. */
9239         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9240 };
9241
9242 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9243 {
9244         int i;
9245
9246         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9247                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9248                      tp->pdev->subsystem_vendor) &&
9249                     (subsys_id_to_phy_id[i].subsys_devid ==
9250                      tp->pdev->subsystem_device))
9251                         return &subsys_id_to_phy_id[i];
9252         }
9253         return NULL;
9254 }
9255
9256 /* Since this function may be called in D3-hot power state during
9257  * tg3_init_one(), only config cycles are allowed.
9258  */
9259 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9260 {
9261         u32 val;
9262
9263         /* Make sure register accesses (indirect or otherwise)
9264          * will function correctly.
9265          */
9266         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9267                                tp->misc_host_ctrl);
9268
9269         tp->phy_id = PHY_ID_INVALID;
9270         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9271
9272         /* Do not even try poking around in here on Sun parts.  */
9273         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9274                 return;
9275
9276         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9277         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9278                 u32 nic_cfg, led_cfg;
9279                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9280                 int eeprom_phy_serdes = 0;
9281
9282                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9283                 tp->nic_sram_data_cfg = nic_cfg;
9284
9285                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9286                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9287                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9288                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9289                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9290                     (ver > 0) && (ver < 0x100))
9291                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9292
9293                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9294                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9295                         eeprom_phy_serdes = 1;
9296
9297                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9298                 if (nic_phy_id != 0) {
9299                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9300                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9301
9302                         eeprom_phy_id  = (id1 >> 16) << 10;
9303                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9304                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9305                 } else
9306                         eeprom_phy_id = 0;
9307
9308                 tp->phy_id = eeprom_phy_id;
9309                 if (eeprom_phy_serdes) {
9310                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9311                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9312                         else
9313                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9314                 }
9315
9316                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9317                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9318                                     SHASTA_EXT_LED_MODE_MASK);
9319                 else
9320                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9321
9322                 switch (led_cfg) {
9323                 default:
9324                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9325                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9326                         break;
9327
9328                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9329                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9330                         break;
9331
9332                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9333                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9334
9335                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9336                          * read on some older 5700/5701 bootcode.
9337                          */
9338                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9339                             ASIC_REV_5700 ||
9340                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9341                             ASIC_REV_5701)
9342                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9343
9344                         break;
9345
9346                 case SHASTA_EXT_LED_SHARED:
9347                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9348                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9349                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9350                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9351                                                  LED_CTRL_MODE_PHY_2);
9352                         break;
9353
9354                 case SHASTA_EXT_LED_MAC:
9355                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9356                         break;
9357
9358                 case SHASTA_EXT_LED_COMBO:
9359                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9360                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9361                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9362                                                  LED_CTRL_MODE_PHY_2);
9363                         break;
9364
9365                 };
9366
9367                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9368                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9369                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9370                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9371
9372                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9373                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9374                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9375                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9376
9377                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9378                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9379                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9380                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9381                 }
9382                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9383                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9384
9385                 if (cfg2 & (1 << 17))
9386                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9387
9388                 /* serdes signal pre-emphasis in register 0x590 set by */
9389                 /* bootcode if bit 18 is set */
9390                 if (cfg2 & (1 << 18))
9391                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9392         }
9393 }
9394
9395 static int __devinit tg3_phy_probe(struct tg3 *tp)
9396 {
9397         u32 hw_phy_id_1, hw_phy_id_2;
9398         u32 hw_phy_id, hw_phy_id_masked;
9399         int err;
9400
9401         /* Reading the PHY ID register can conflict with ASF
9402          * firwmare access to the PHY hardware.
9403          */
9404         err = 0;
9405         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9406                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9407         } else {
9408                 /* Now read the physical PHY_ID from the chip and verify
9409                  * that it is sane.  If it doesn't look good, we fall back
9410                  * to either the hard-coded table based PHY_ID and failing
9411                  * that the value found in the eeprom area.
9412                  */
9413                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9414                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9415
9416                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9417                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9418                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9419
9420                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9421         }
9422
9423         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9424                 tp->phy_id = hw_phy_id;
9425                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9426                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9427                 else
9428                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9429         } else {
9430                 if (tp->phy_id != PHY_ID_INVALID) {
9431                         /* Do nothing, phy ID already set up in
9432                          * tg3_get_eeprom_hw_cfg().
9433                          */
9434                 } else {
9435                         struct subsys_tbl_ent *p;
9436
9437                         /* No eeprom signature?  Try the hardcoded
9438                          * subsys device table.
9439                          */
9440                         p = lookup_by_subsys(tp);
9441                         if (!p)
9442                                 return -ENODEV;
9443
9444                         tp->phy_id = p->phy_id;
9445                         if (!tp->phy_id ||
9446                             tp->phy_id == PHY_ID_BCM8002)
9447                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9448                 }
9449         }
9450
9451         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9452             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9453                 u32 bmsr, adv_reg, tg3_ctrl;
9454
9455                 tg3_readphy(tp, MII_BMSR, &bmsr);
9456                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9457                     (bmsr & BMSR_LSTATUS))
9458                         goto skip_phy_reset;
9459                     
9460                 err = tg3_phy_reset(tp);
9461                 if (err)
9462                         return err;
9463
9464                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9465                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9466                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9467                 tg3_ctrl = 0;
9468                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9469                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9470                                     MII_TG3_CTRL_ADV_1000_FULL);
9471                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9472                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9473                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9474                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9475                 }
9476
9477                 if (!tg3_copper_is_advertising_all(tp)) {
9478                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9479
9480                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9481                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9482
9483                         tg3_writephy(tp, MII_BMCR,
9484                                      BMCR_ANENABLE | BMCR_ANRESTART);
9485                 }
9486                 tg3_phy_set_wirespeed(tp);
9487
9488                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9489                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9490                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9491         }
9492
9493 skip_phy_reset:
9494         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9495                 err = tg3_init_5401phy_dsp(tp);
9496                 if (err)
9497                         return err;
9498         }
9499
9500         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9501                 err = tg3_init_5401phy_dsp(tp);
9502         }
9503
9504         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9505                 tp->link_config.advertising =
9506                         (ADVERTISED_1000baseT_Half |
9507                          ADVERTISED_1000baseT_Full |
9508                          ADVERTISED_Autoneg |
9509                          ADVERTISED_FIBRE);
9510         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9511                 tp->link_config.advertising &=
9512                         ~(ADVERTISED_1000baseT_Half |
9513                           ADVERTISED_1000baseT_Full);
9514
9515         return err;
9516 }
9517
9518 static void __devinit tg3_read_partno(struct tg3 *tp)
9519 {
9520         unsigned char vpd_data[256];
9521         int i;
9522         u32 magic;
9523
9524         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9525                 /* Sun decided not to put the necessary bits in the
9526                  * NVRAM of their onboard tg3 parts :(
9527                  */
9528                 strcpy(tp->board_part_number, "Sun 570X");
9529                 return;
9530         }
9531
9532         if (tg3_nvram_read(tp, 0x0, &magic))
9533                 return;
9534
9535         if (swab32(magic) == TG3_EEPROM_MAGIC) {
9536                 for (i = 0; i < 256; i += 4) {
9537                         u32 tmp;
9538
9539                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9540                                 goto out_not_found;
9541
9542                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9543                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9544                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9545                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9546                 }
9547         } else {
9548                 int vpd_cap;
9549
9550                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9551                 for (i = 0; i < 256; i += 4) {
9552                         u32 tmp, j = 0;
9553                         u16 tmp16;
9554
9555                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9556                                               i);
9557                         while (j++ < 100) {
9558                                 pci_read_config_word(tp->pdev, vpd_cap +
9559                                                      PCI_VPD_ADDR, &tmp16);
9560                                 if (tmp16 & 0x8000)
9561                                         break;
9562                                 msleep(1);
9563                         }
9564                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9565                                               &tmp);
9566                         tmp = cpu_to_le32(tmp);
9567                         memcpy(&vpd_data[i], &tmp, 4);
9568                 }
9569         }
9570
9571         /* Now parse and find the part number. */
9572         for (i = 0; i < 256; ) {
9573                 unsigned char val = vpd_data[i];
9574                 int block_end;
9575
9576                 if (val == 0x82 || val == 0x91) {
9577                         i = (i + 3 +
9578                              (vpd_data[i + 1] +
9579                               (vpd_data[i + 2] << 8)));
9580                         continue;
9581                 }
9582
9583                 if (val != 0x90)
9584                         goto out_not_found;
9585
9586                 block_end = (i + 3 +
9587                              (vpd_data[i + 1] +
9588                               (vpd_data[i + 2] << 8)));
9589                 i += 3;
9590                 while (i < block_end) {
9591                         if (vpd_data[i + 0] == 'P' &&
9592                             vpd_data[i + 1] == 'N') {
9593                                 int partno_len = vpd_data[i + 2];
9594
9595                                 if (partno_len > 24)
9596                                         goto out_not_found;
9597
9598                                 memcpy(tp->board_part_number,
9599                                        &vpd_data[i + 3],
9600                                        partno_len);
9601
9602                                 /* Success. */
9603                                 return;
9604                         }
9605                 }
9606
9607                 /* Part number not found. */
9608                 goto out_not_found;
9609         }
9610
9611 out_not_found:
9612         strcpy(tp->board_part_number, "none");
9613 }
9614
9615 #ifdef CONFIG_SPARC64
9616 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9617 {
9618         struct pci_dev *pdev = tp->pdev;
9619         struct pcidev_cookie *pcp = pdev->sysdata;
9620
9621         if (pcp != NULL) {
9622                 int node = pcp->prom_node;
9623                 u32 venid;
9624                 int err;
9625
9626                 err = prom_getproperty(node, "subsystem-vendor-id",
9627                                        (char *) &venid, sizeof(venid));
9628                 if (err == 0 || err == -1)
9629                         return 0;
9630                 if (venid == PCI_VENDOR_ID_SUN)
9631                         return 1;
9632
9633                 /* TG3 chips onboard the SunBlade-2500 don't have the
9634                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9635                  * are distinguishable from non-Sun variants by being
9636                  * named "network" by the firmware.  Non-Sun cards will
9637                  * show up as being named "ethernet".
9638                  */
9639                 if (!strcmp(pcp->prom_name, "network"))
9640                         return 1;
9641         }
9642         return 0;
9643 }
9644 #endif
9645
9646 static int __devinit tg3_get_invariants(struct tg3 *tp)
9647 {
9648         static struct pci_device_id write_reorder_chipsets[] = {
9649                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9650                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9651                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9652                              PCI_DEVICE_ID_VIA_8385_0) },
9653                 { },
9654         };
9655         u32 misc_ctrl_reg;
9656         u32 cacheline_sz_reg;
9657         u32 pci_state_reg, grc_misc_cfg;
9658         u32 val;
9659         u16 pci_cmd;
9660         int err;
9661
9662 #ifdef CONFIG_SPARC64
9663         if (tg3_is_sun_570X(tp))
9664                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9665 #endif
9666
9667         /* Force memory write invalidate off.  If we leave it on,
9668          * then on 5700_BX chips we have to enable a workaround.
9669          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9670          * to match the cacheline size.  The Broadcom driver have this
9671          * workaround but turns MWI off all the times so never uses
9672          * it.  This seems to suggest that the workaround is insufficient.
9673          */
9674         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9675         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9676         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9677
9678         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9679          * has the register indirect write enable bit set before
9680          * we try to access any of the MMIO registers.  It is also
9681          * critical that the PCI-X hw workaround situation is decided
9682          * before that as well.
9683          */
9684         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9685                               &misc_ctrl_reg);
9686
9687         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9688                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9689
9690         /* Wrong chip ID in 5752 A0. This code can be removed later
9691          * as A0 is not in production.
9692          */
9693         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9694                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9695
9696         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9697          * we need to disable memory and use config. cycles
9698          * only to access all registers. The 5702/03 chips
9699          * can mistakenly decode the special cycles from the
9700          * ICH chipsets as memory write cycles, causing corruption
9701          * of register and memory space. Only certain ICH bridges
9702          * will drive special cycles with non-zero data during the
9703          * address phase which can fall within the 5703's address
9704          * range. This is not an ICH bug as the PCI spec allows
9705          * non-zero address during special cycles. However, only
9706          * these ICH bridges are known to drive non-zero addresses
9707          * during special cycles.
9708          *
9709          * Since special cycles do not cross PCI bridges, we only
9710          * enable this workaround if the 5703 is on the secondary
9711          * bus of these ICH bridges.
9712          */
9713         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9714             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9715                 static struct tg3_dev_id {
9716                         u32     vendor;
9717                         u32     device;
9718                         u32     rev;
9719                 } ich_chipsets[] = {
9720                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9721                           PCI_ANY_ID },
9722                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9723                           PCI_ANY_ID },
9724                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9725                           0xa },
9726                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9727                           PCI_ANY_ID },
9728                         { },
9729                 };
9730                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9731                 struct pci_dev *bridge = NULL;
9732
9733                 while (pci_id->vendor != 0) {
9734                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9735                                                 bridge);
9736                         if (!bridge) {
9737                                 pci_id++;
9738                                 continue;
9739                         }
9740                         if (pci_id->rev != PCI_ANY_ID) {
9741                                 u8 rev;
9742
9743                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9744                                                      &rev);
9745                                 if (rev > pci_id->rev)
9746                                         continue;
9747                         }
9748                         if (bridge->subordinate &&
9749                             (bridge->subordinate->number ==
9750                              tp->pdev->bus->number)) {
9751
9752                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9753                                 pci_dev_put(bridge);
9754                                 break;
9755                         }
9756                 }
9757         }
9758
9759         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9760          * DMA addresses > 40-bit. This bridge may have other additional
9761          * 57xx devices behind it in some 4-port NIC designs for example.
9762          * Any tg3 device found behind the bridge will also need the 40-bit
9763          * DMA workaround.
9764          */
9765         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9766             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9767                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9768                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9769                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9770         }
9771         else {
9772                 struct pci_dev *bridge = NULL;
9773
9774                 do {
9775                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9776                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
9777                                                 bridge);
9778                         if (bridge && bridge->subordinate &&
9779                             (bridge->subordinate->number <=
9780                              tp->pdev->bus->number) &&
9781                             (bridge->subordinate->subordinate >=
9782                              tp->pdev->bus->number)) {
9783                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9784                                 pci_dev_put(bridge);
9785                                 break;
9786                         }
9787                 } while (bridge);
9788         }
9789
9790         /* Initialize misc host control in PCI block. */
9791         tp->misc_host_ctrl |= (misc_ctrl_reg &
9792                                MISC_HOST_CTRL_CHIPREV);
9793         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9794                                tp->misc_host_ctrl);
9795
9796         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9797                               &cacheline_sz_reg);
9798
9799         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9800         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9801         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9802         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9803
9804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9806             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9807             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9808                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9809
9810         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9811             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9812                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9813
9814         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9815                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9816
9817         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9818             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9819             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
9820             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
9821                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9822
9823         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9824                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9825
9826         /* If we have an AMD 762 or VIA K8T800 chipset, write
9827          * reordering to the mailbox registers done by the host
9828          * controller can cause major troubles.  We read back from
9829          * every mailbox register write to force the writes to be
9830          * posted to the chip in order.
9831          */
9832         if (pci_dev_present(write_reorder_chipsets) &&
9833             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9834                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9835
9836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9837             tp->pci_lat_timer < 64) {
9838                 tp->pci_lat_timer = 64;
9839
9840                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9841                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9842                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9843                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9844
9845                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9846                                        cacheline_sz_reg);
9847         }
9848
9849         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9850                               &pci_state_reg);
9851
9852         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9853                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9854
9855                 /* If this is a 5700 BX chipset, and we are in PCI-X
9856                  * mode, enable register write workaround.
9857                  *
9858                  * The workaround is to use indirect register accesses
9859                  * for all chip writes not to mailbox registers.
9860                  */
9861                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9862                         u32 pm_reg;
9863                         u16 pci_cmd;
9864
9865                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9866
9867                         /* The chip can have it's power management PCI config
9868                          * space registers clobbered due to this bug.
9869                          * So explicitly force the chip into D0 here.
9870                          */
9871                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9872                                               &pm_reg);
9873                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9874                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9875                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9876                                                pm_reg);
9877
9878                         /* Also, force SERR#/PERR# in PCI command. */
9879                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9880                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9881                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9882                 }
9883         }
9884
9885         /* 5700 BX chips need to have their TX producer index mailboxes
9886          * written twice to workaround a bug.
9887          */
9888         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9889                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9890
9891         /* Back to back register writes can cause problems on this chip,
9892          * the workaround is to read back all reg writes except those to
9893          * mailbox regs.  See tg3_write_indirect_reg32().
9894          *
9895          * PCI Express 5750_A0 rev chips need this workaround too.
9896          */
9897         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9898             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9899              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9900                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9901
9902         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9903                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9904         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9905                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9906
9907         /* Chip-specific fixup from Broadcom driver */
9908         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9909             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9910                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9911                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9912         }
9913
9914         /* Default fast path register access methods */
9915         tp->read32 = tg3_read32;
9916         tp->write32 = tg3_write32;
9917         tp->read32_mbox = tg3_read32;
9918         tp->write32_mbox = tg3_write32;
9919         tp->write32_tx_mbox = tg3_write32;
9920         tp->write32_rx_mbox = tg3_write32;
9921
9922         /* Various workaround register access methods */
9923         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9924                 tp->write32 = tg3_write_indirect_reg32;
9925         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9926                 tp->write32 = tg3_write_flush_reg32;
9927
9928         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9929             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9930                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9931                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9932                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9933         }
9934
9935         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9936                 tp->read32 = tg3_read_indirect_reg32;
9937                 tp->write32 = tg3_write_indirect_reg32;
9938                 tp->read32_mbox = tg3_read_indirect_mbox;
9939                 tp->write32_mbox = tg3_write_indirect_mbox;
9940                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9941                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9942
9943                 iounmap(tp->regs);
9944                 tp->regs = NULL;
9945
9946                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9947                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9948                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9949         }
9950
9951         /* Get eeprom hw config before calling tg3_set_power_state().
9952          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9953          * determined before calling tg3_set_power_state() so that
9954          * we know whether or not to switch out of Vaux power.
9955          * When the flag is set, it means that GPIO1 is used for eeprom
9956          * write protect and also implies that it is a LOM where GPIOs
9957          * are not used to switch power.
9958          */ 
9959         tg3_get_eeprom_hw_cfg(tp);
9960
9961         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9962          * GPIO1 driven high will bring 5700's external PHY out of reset.
9963          * It is also used as eeprom write protect on LOMs.
9964          */
9965         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9966         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9967             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9968                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9969                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9970         /* Unused GPIO3 must be driven as output on 5752 because there
9971          * are no pull-up resistors on unused GPIO pins.
9972          */
9973         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9974                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9975
9976         /* Force the chip into D0. */
9977         err = tg3_set_power_state(tp, PCI_D0);
9978         if (err) {
9979                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9980                        pci_name(tp->pdev));
9981                 return err;
9982         }
9983
9984         /* 5700 B0 chips do not support checksumming correctly due
9985          * to hardware bugs.
9986          */
9987         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9988                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9989
9990         /* Pseudo-header checksum is done by hardware logic and not
9991          * the offload processers, so make the chip do the pseudo-
9992          * header checksums on receive.  For transmit it is more
9993          * convenient to do the pseudo-header checksum in software
9994          * as Linux does that on transmit for us in all cases.
9995          */
9996         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9997         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9998
9999         /* Derive initial jumbo mode from MTU assigned in
10000          * ether_setup() via the alloc_etherdev() call
10001          */
10002         if (tp->dev->mtu > ETH_DATA_LEN &&
10003             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10004                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10005
10006         /* Determine WakeOnLan speed to use. */
10007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10008             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10009             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10010             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10011                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10012         } else {
10013                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10014         }
10015
10016         /* A few boards don't want Ethernet@WireSpeed phy feature */
10017         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10018             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10019              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10020              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10021             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10022                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10023
10024         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10025             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10026                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10027         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10028                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10029
10030         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10031             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10032                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10033
10034         tp->coalesce_mode = 0;
10035         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10036             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10037                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10038
10039         /* Initialize MAC MI mode, polling disabled. */
10040         tw32_f(MAC_MI_MODE, tp->mi_mode);
10041         udelay(80);
10042
10043         /* Initialize data/descriptor byte/word swapping. */
10044         val = tr32(GRC_MODE);
10045         val &= GRC_MODE_HOST_STACKUP;
10046         tw32(GRC_MODE, val | tp->grc_mode);
10047
10048         tg3_switch_clocks(tp);
10049
10050         /* Clear this out for sanity. */
10051         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10052
10053         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10054                               &pci_state_reg);
10055         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10056             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10057                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10058
10059                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10060                     chiprevid == CHIPREV_ID_5701_B0 ||
10061                     chiprevid == CHIPREV_ID_5701_B2 ||
10062                     chiprevid == CHIPREV_ID_5701_B5) {
10063                         void __iomem *sram_base;
10064
10065                         /* Write some dummy words into the SRAM status block
10066                          * area, see if it reads back correctly.  If the return
10067                          * value is bad, force enable the PCIX workaround.
10068                          */
10069                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10070
10071                         writel(0x00000000, sram_base);
10072                         writel(0x00000000, sram_base + 4);
10073                         writel(0xffffffff, sram_base + 4);
10074                         if (readl(sram_base) != 0x00000000)
10075                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10076                 }
10077         }
10078
10079         udelay(50);
10080         tg3_nvram_init(tp);
10081
10082         grc_misc_cfg = tr32(GRC_MISC_CFG);
10083         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10084
10085         /* Broadcom's driver says that CIOBE multisplit has a bug */
10086 #if 0
10087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10088             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10089                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10090                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10091         }
10092 #endif
10093         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10094             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10095              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10096                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10097
10098         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10099             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10100                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10101         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10102                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10103                                       HOSTCC_MODE_CLRTICK_TXBD);
10104
10105                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10106                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10107                                        tp->misc_host_ctrl);
10108         }
10109
10110         /* these are limited to 10/100 only */
10111         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10112              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10113             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10114              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10115              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10116               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10117               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10118             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10119              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10120               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10121                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10122
10123         err = tg3_phy_probe(tp);
10124         if (err) {
10125                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10126                        pci_name(tp->pdev), err);
10127                 /* ... but do not return immediately ... */
10128         }
10129
10130         tg3_read_partno(tp);
10131
10132         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10133                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10134         } else {
10135                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10136                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10137                 else
10138                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10139         }
10140
10141         /* 5700 {AX,BX} chips have a broken status block link
10142          * change bit implementation, so we must use the
10143          * status register in those cases.
10144          */
10145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10146                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10147         else
10148                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10149
10150         /* The led_ctrl is set during tg3_phy_probe, here we might
10151          * have to force the link status polling mechanism based
10152          * upon subsystem IDs.
10153          */
10154         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10155             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10156                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10157                                   TG3_FLAG_USE_LINKCHG_REG);
10158         }
10159
10160         /* For all SERDES we poll the MAC status register. */
10161         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10162                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10163         else
10164                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10165
10166         /* It seems all chips can get confused if TX buffers
10167          * straddle the 4GB address boundary in some cases.
10168          */
10169         tp->dev->hard_start_xmit = tg3_start_xmit;
10170
10171         tp->rx_offset = 2;
10172         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10173             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10174                 tp->rx_offset = 0;
10175
10176         /* By default, disable wake-on-lan.  User can change this
10177          * using ETHTOOL_SWOL.
10178          */
10179         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10180
10181         return err;
10182 }
10183
10184 #ifdef CONFIG_SPARC64
10185 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10186 {
10187         struct net_device *dev = tp->dev;
10188         struct pci_dev *pdev = tp->pdev;
10189         struct pcidev_cookie *pcp = pdev->sysdata;
10190
10191         if (pcp != NULL) {
10192                 int node = pcp->prom_node;
10193
10194                 if (prom_getproplen(node, "local-mac-address") == 6) {
10195                         prom_getproperty(node, "local-mac-address",
10196                                          dev->dev_addr, 6);
10197                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10198                         return 0;
10199                 }
10200         }
10201         return -ENODEV;
10202 }
10203
10204 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10205 {
10206         struct net_device *dev = tp->dev;
10207
10208         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10209         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10210         return 0;
10211 }
10212 #endif
10213
10214 static int __devinit tg3_get_device_address(struct tg3 *tp)
10215 {
10216         struct net_device *dev = tp->dev;
10217         u32 hi, lo, mac_offset;
10218
10219 #ifdef CONFIG_SPARC64
10220         if (!tg3_get_macaddr_sparc(tp))
10221                 return 0;
10222 #endif
10223
10224         mac_offset = 0x7c;
10225         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10226              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10227             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10228                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10229                         mac_offset = 0xcc;
10230                 if (tg3_nvram_lock(tp))
10231                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10232                 else
10233                         tg3_nvram_unlock(tp);
10234         }
10235
10236         /* First try to get it from MAC address mailbox. */
10237         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10238         if ((hi >> 16) == 0x484b) {
10239                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10240                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10241
10242                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10243                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10244                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10245                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10246                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10247         }
10248         /* Next, try NVRAM. */
10249         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10250                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10251                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10252                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10253                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10254                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10255                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10256                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10257                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10258         }
10259         /* Finally just fetch it out of the MAC control regs. */
10260         else {
10261                 hi = tr32(MAC_ADDR_0_HIGH);
10262                 lo = tr32(MAC_ADDR_0_LOW);
10263
10264                 dev->dev_addr[5] = lo & 0xff;
10265                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10266                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10267                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10268                 dev->dev_addr[1] = hi & 0xff;
10269                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10270         }
10271
10272         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10273 #ifdef CONFIG_SPARC64
10274                 if (!tg3_get_default_macaddr_sparc(tp))
10275                         return 0;
10276 #endif
10277                 return -EINVAL;
10278         }
10279         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10280         return 0;
10281 }
10282
10283 #define BOUNDARY_SINGLE_CACHELINE       1
10284 #define BOUNDARY_MULTI_CACHELINE        2
10285
10286 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10287 {
10288         int cacheline_size;
10289         u8 byte;
10290         int goal;
10291
10292         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10293         if (byte == 0)
10294                 cacheline_size = 1024;
10295         else
10296                 cacheline_size = (int) byte * 4;
10297
10298         /* On 5703 and later chips, the boundary bits have no
10299          * effect.
10300          */
10301         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10302             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10303             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10304                 goto out;
10305
10306 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10307         goal = BOUNDARY_MULTI_CACHELINE;
10308 #else
10309 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10310         goal = BOUNDARY_SINGLE_CACHELINE;
10311 #else
10312         goal = 0;
10313 #endif
10314 #endif
10315
10316         if (!goal)
10317                 goto out;
10318
10319         /* PCI controllers on most RISC systems tend to disconnect
10320          * when a device tries to burst across a cache-line boundary.
10321          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10322          *
10323          * Unfortunately, for PCI-E there are only limited
10324          * write-side controls for this, and thus for reads
10325          * we will still get the disconnects.  We'll also waste
10326          * these PCI cycles for both read and write for chips
10327          * other than 5700 and 5701 which do not implement the
10328          * boundary bits.
10329          */
10330         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10331             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10332                 switch (cacheline_size) {
10333                 case 16:
10334                 case 32:
10335                 case 64:
10336                 case 128:
10337                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10338                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10339                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10340                         } else {
10341                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10342                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10343                         }
10344                         break;
10345
10346                 case 256:
10347                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10348                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10349                         break;
10350
10351                 default:
10352                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10353                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10354                         break;
10355                 };
10356         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10357                 switch (cacheline_size) {
10358                 case 16:
10359                 case 32:
10360                 case 64:
10361                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10362                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10363                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10364                                 break;
10365                         }
10366                         /* fallthrough */
10367                 case 128:
10368                 default:
10369                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10370                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10371                         break;
10372                 };
10373         } else {
10374                 switch (cacheline_size) {
10375                 case 16:
10376                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10377                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10378                                         DMA_RWCTRL_WRITE_BNDRY_16);
10379                                 break;
10380                         }
10381                         /* fallthrough */
10382                 case 32:
10383                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10384                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10385                                         DMA_RWCTRL_WRITE_BNDRY_32);
10386                                 break;
10387                         }
10388                         /* fallthrough */
10389                 case 64:
10390                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10391                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10392                                         DMA_RWCTRL_WRITE_BNDRY_64);
10393                                 break;
10394                         }
10395                         /* fallthrough */
10396                 case 128:
10397                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10398                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10399                                         DMA_RWCTRL_WRITE_BNDRY_128);
10400                                 break;
10401                         }
10402                         /* fallthrough */
10403                 case 256:
10404                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10405                                 DMA_RWCTRL_WRITE_BNDRY_256);
10406                         break;
10407                 case 512:
10408                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10409                                 DMA_RWCTRL_WRITE_BNDRY_512);
10410                         break;
10411                 case 1024:
10412                 default:
10413                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10414                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10415                         break;
10416                 };
10417         }
10418
10419 out:
10420         return val;
10421 }
10422
10423 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10424 {
10425         struct tg3_internal_buffer_desc test_desc;
10426         u32 sram_dma_descs;
10427         int i, ret;
10428
10429         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10430
10431         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10432         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10433         tw32(RDMAC_STATUS, 0);
10434         tw32(WDMAC_STATUS, 0);
10435
10436         tw32(BUFMGR_MODE, 0);
10437         tw32(FTQ_RESET, 0);
10438
10439         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10440         test_desc.addr_lo = buf_dma & 0xffffffff;
10441         test_desc.nic_mbuf = 0x00002100;
10442         test_desc.len = size;
10443
10444         /*
10445          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10446          * the *second* time the tg3 driver was getting loaded after an
10447          * initial scan.
10448          *
10449          * Broadcom tells me:
10450          *   ...the DMA engine is connected to the GRC block and a DMA
10451          *   reset may affect the GRC block in some unpredictable way...
10452          *   The behavior of resets to individual blocks has not been tested.
10453          *
10454          * Broadcom noted the GRC reset will also reset all sub-components.
10455          */
10456         if (to_device) {
10457                 test_desc.cqid_sqid = (13 << 8) | 2;
10458
10459                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10460                 udelay(40);
10461         } else {
10462                 test_desc.cqid_sqid = (16 << 8) | 7;
10463
10464                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10465                 udelay(40);
10466         }
10467         test_desc.flags = 0x00000005;
10468
10469         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10470                 u32 val;
10471
10472                 val = *(((u32 *)&test_desc) + i);
10473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10474                                        sram_dma_descs + (i * sizeof(u32)));
10475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10476         }
10477         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10478
10479         if (to_device) {
10480                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10481         } else {
10482                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10483         }
10484
10485         ret = -ENODEV;
10486         for (i = 0; i < 40; i++) {
10487                 u32 val;
10488
10489                 if (to_device)
10490                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10491                 else
10492                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10493                 if ((val & 0xffff) == sram_dma_descs) {
10494                         ret = 0;
10495                         break;
10496                 }
10497
10498                 udelay(100);
10499         }
10500
10501         return ret;
10502 }
10503
10504 #define TEST_BUFFER_SIZE        0x2000
10505
10506 static int __devinit tg3_test_dma(struct tg3 *tp)
10507 {
10508         dma_addr_t buf_dma;
10509         u32 *buf, saved_dma_rwctrl;
10510         int ret;
10511
10512         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10513         if (!buf) {
10514                 ret = -ENOMEM;
10515                 goto out_nofree;
10516         }
10517
10518         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10519                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10520
10521         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10522
10523         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10524                 /* DMA read watermark not used on PCIE */
10525                 tp->dma_rwctrl |= 0x00180000;
10526         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10527                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10528                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10529                         tp->dma_rwctrl |= 0x003f0000;
10530                 else
10531                         tp->dma_rwctrl |= 0x003f000f;
10532         } else {
10533                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10535                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10536
10537                         /* If the 5704 is behind the EPB bridge, we can
10538                          * do the less restrictive ONE_DMA workaround for
10539                          * better performance.
10540                          */
10541                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10542                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10543                                 tp->dma_rwctrl |= 0x8000;
10544                         else if (ccval == 0x6 || ccval == 0x7)
10545                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10546
10547                         /* Set bit 23 to enable PCIX hw bug fix */
10548                         tp->dma_rwctrl |= 0x009f0000;
10549                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10550                         /* 5780 always in PCIX mode */
10551                         tp->dma_rwctrl |= 0x00144000;
10552                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10553                         /* 5714 always in PCIX mode */
10554                         tp->dma_rwctrl |= 0x00148000;
10555                 } else {
10556                         tp->dma_rwctrl |= 0x001b000f;
10557                 }
10558         }
10559
10560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10562                 tp->dma_rwctrl &= 0xfffffff0;
10563
10564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10565             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10566                 /* Remove this if it causes problems for some boards. */
10567                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10568
10569                 /* On 5700/5701 chips, we need to set this bit.
10570                  * Otherwise the chip will issue cacheline transactions
10571                  * to streamable DMA memory with not all the byte
10572                  * enables turned on.  This is an error on several
10573                  * RISC PCI controllers, in particular sparc64.
10574                  *
10575                  * On 5703/5704 chips, this bit has been reassigned
10576                  * a different meaning.  In particular, it is used
10577                  * on those chips to enable a PCI-X workaround.
10578                  */
10579                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10580         }
10581
10582         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10583
10584 #if 0
10585         /* Unneeded, already done by tg3_get_invariants.  */
10586         tg3_switch_clocks(tp);
10587 #endif
10588
10589         ret = 0;
10590         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10591             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10592                 goto out;
10593
10594         /* It is best to perform DMA test with maximum write burst size
10595          * to expose the 5700/5701 write DMA bug.
10596          */
10597         saved_dma_rwctrl = tp->dma_rwctrl;
10598         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10599         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10600
10601         while (1) {
10602                 u32 *p = buf, i;
10603
10604                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10605                         p[i] = i;
10606
10607                 /* Send the buffer to the chip. */
10608                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10609                 if (ret) {
10610                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10611                         break;
10612                 }
10613
10614 #if 0
10615                 /* validate data reached card RAM correctly. */
10616                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10617                         u32 val;
10618                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10619                         if (le32_to_cpu(val) != p[i]) {
10620                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10621                                 /* ret = -ENODEV here? */
10622                         }
10623                         p[i] = 0;
10624                 }
10625 #endif
10626                 /* Now read it back. */
10627                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10628                 if (ret) {
10629                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10630
10631                         break;
10632                 }
10633
10634                 /* Verify it. */
10635                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10636                         if (p[i] == i)
10637                                 continue;
10638
10639                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10640                             DMA_RWCTRL_WRITE_BNDRY_16) {
10641                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10642                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10643                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10644                                 break;
10645                         } else {
10646                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10647                                 ret = -ENODEV;
10648                                 goto out;
10649                         }
10650                 }
10651
10652                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10653                         /* Success. */
10654                         ret = 0;
10655                         break;
10656                 }
10657         }
10658         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10659             DMA_RWCTRL_WRITE_BNDRY_16) {
10660                 static struct pci_device_id dma_wait_state_chipsets[] = {
10661                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10662                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10663                         { },
10664                 };
10665
10666                 /* DMA test passed without adjusting DMA boundary,
10667                  * now look for chipsets that are known to expose the
10668                  * DMA bug without failing the test.
10669                  */
10670                 if (pci_dev_present(dma_wait_state_chipsets)) {
10671                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10672                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10673                 }
10674                 else
10675                         /* Safe to use the calculated DMA boundary. */
10676                         tp->dma_rwctrl = saved_dma_rwctrl;
10677
10678                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10679         }
10680
10681 out:
10682         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10683 out_nofree:
10684         return ret;
10685 }
10686
10687 static void __devinit tg3_init_link_config(struct tg3 *tp)
10688 {
10689         tp->link_config.advertising =
10690                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10691                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10692                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10693                  ADVERTISED_Autoneg | ADVERTISED_MII);
10694         tp->link_config.speed = SPEED_INVALID;
10695         tp->link_config.duplex = DUPLEX_INVALID;
10696         tp->link_config.autoneg = AUTONEG_ENABLE;
10697         netif_carrier_off(tp->dev);
10698         tp->link_config.active_speed = SPEED_INVALID;
10699         tp->link_config.active_duplex = DUPLEX_INVALID;
10700         tp->link_config.phy_is_low_power = 0;
10701         tp->link_config.orig_speed = SPEED_INVALID;
10702         tp->link_config.orig_duplex = DUPLEX_INVALID;
10703         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10704 }
10705
10706 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10707 {
10708         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10709                 tp->bufmgr_config.mbuf_read_dma_low_water =
10710                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10711                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10712                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10713                 tp->bufmgr_config.mbuf_high_water =
10714                         DEFAULT_MB_HIGH_WATER_5705;
10715
10716                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10717                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10718                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10719                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10720                 tp->bufmgr_config.mbuf_high_water_jumbo =
10721                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10722         } else {
10723                 tp->bufmgr_config.mbuf_read_dma_low_water =
10724                         DEFAULT_MB_RDMA_LOW_WATER;
10725                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10726                         DEFAULT_MB_MACRX_LOW_WATER;
10727                 tp->bufmgr_config.mbuf_high_water =
10728                         DEFAULT_MB_HIGH_WATER;
10729
10730                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10731                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10732                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10733                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10734                 tp->bufmgr_config.mbuf_high_water_jumbo =
10735                         DEFAULT_MB_HIGH_WATER_JUMBO;
10736         }
10737
10738         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10739         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10740 }
10741
10742 static char * __devinit tg3_phy_string(struct tg3 *tp)
10743 {
10744         switch (tp->phy_id & PHY_ID_MASK) {
10745         case PHY_ID_BCM5400:    return "5400";
10746         case PHY_ID_BCM5401:    return "5401";
10747         case PHY_ID_BCM5411:    return "5411";
10748         case PHY_ID_BCM5701:    return "5701";
10749         case PHY_ID_BCM5703:    return "5703";
10750         case PHY_ID_BCM5704:    return "5704";
10751         case PHY_ID_BCM5705:    return "5705";
10752         case PHY_ID_BCM5750:    return "5750";
10753         case PHY_ID_BCM5752:    return "5752";
10754         case PHY_ID_BCM5714:    return "5714";
10755         case PHY_ID_BCM5780:    return "5780";
10756         case PHY_ID_BCM5787:    return "5787";
10757         case PHY_ID_BCM8002:    return "8002/serdes";
10758         case 0:                 return "serdes";
10759         default:                return "unknown";
10760         };
10761 }
10762
10763 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10764 {
10765         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10766                 strcpy(str, "PCI Express");
10767                 return str;
10768         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10769                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10770
10771                 strcpy(str, "PCIX:");
10772
10773                 if ((clock_ctrl == 7) ||
10774                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10775                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10776                         strcat(str, "133MHz");
10777                 else if (clock_ctrl == 0)
10778                         strcat(str, "33MHz");
10779                 else if (clock_ctrl == 2)
10780                         strcat(str, "50MHz");
10781                 else if (clock_ctrl == 4)
10782                         strcat(str, "66MHz");
10783                 else if (clock_ctrl == 6)
10784                         strcat(str, "100MHz");
10785         } else {
10786                 strcpy(str, "PCI:");
10787                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10788                         strcat(str, "66MHz");
10789                 else
10790                         strcat(str, "33MHz");
10791         }
10792         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10793                 strcat(str, ":32-bit");
10794         else
10795                 strcat(str, ":64-bit");
10796         return str;
10797 }
10798
10799 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10800 {
10801         struct pci_dev *peer;
10802         unsigned int func, devnr = tp->pdev->devfn & ~7;
10803
10804         for (func = 0; func < 8; func++) {
10805                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10806                 if (peer && peer != tp->pdev)
10807                         break;
10808                 pci_dev_put(peer);
10809         }
10810         /* 5704 can be configured in single-port mode, set peer to
10811          * tp->pdev in that case.
10812          */
10813         if (!peer) {
10814                 peer = tp->pdev;
10815                 return peer;
10816         }
10817
10818         /*
10819          * We don't need to keep the refcount elevated; there's no way
10820          * to remove one half of this device without removing the other
10821          */
10822         pci_dev_put(peer);
10823
10824         return peer;
10825 }
10826
10827 static void __devinit tg3_init_coal(struct tg3 *tp)
10828 {
10829         struct ethtool_coalesce *ec = &tp->coal;
10830
10831         memset(ec, 0, sizeof(*ec));
10832         ec->cmd = ETHTOOL_GCOALESCE;
10833         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10834         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10835         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10836         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10837         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10838         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10839         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10840         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10841         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10842
10843         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10844                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10845                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10846                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10847                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10848                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10849         }
10850
10851         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10852                 ec->rx_coalesce_usecs_irq = 0;
10853                 ec->tx_coalesce_usecs_irq = 0;
10854                 ec->stats_block_coalesce_usecs = 0;
10855         }
10856 }
10857
10858 static int __devinit tg3_init_one(struct pci_dev *pdev,
10859                                   const struct pci_device_id *ent)
10860 {
10861         static int tg3_version_printed = 0;
10862         unsigned long tg3reg_base, tg3reg_len;
10863         struct net_device *dev;
10864         struct tg3 *tp;
10865         int i, err, pm_cap;
10866         char str[40];
10867         u64 dma_mask, persist_dma_mask;
10868
10869         if (tg3_version_printed++ == 0)
10870                 printk(KERN_INFO "%s", version);
10871
10872         err = pci_enable_device(pdev);
10873         if (err) {
10874                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10875                        "aborting.\n");
10876                 return err;
10877         }
10878
10879         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10880                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10881                        "base address, aborting.\n");
10882                 err = -ENODEV;
10883                 goto err_out_disable_pdev;
10884         }
10885
10886         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10887         if (err) {
10888                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10889                        "aborting.\n");
10890                 goto err_out_disable_pdev;
10891         }
10892
10893         pci_set_master(pdev);
10894
10895         /* Find power-management capability. */
10896         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10897         if (pm_cap == 0) {
10898                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10899                        "aborting.\n");
10900                 err = -EIO;
10901                 goto err_out_free_res;
10902         }
10903
10904         tg3reg_base = pci_resource_start(pdev, 0);
10905         tg3reg_len = pci_resource_len(pdev, 0);
10906
10907         dev = alloc_etherdev(sizeof(*tp));
10908         if (!dev) {
10909                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10910                 err = -ENOMEM;
10911                 goto err_out_free_res;
10912         }
10913
10914         SET_MODULE_OWNER(dev);
10915         SET_NETDEV_DEV(dev, &pdev->dev);
10916
10917         dev->features |= NETIF_F_LLTX;
10918 #if TG3_VLAN_TAG_USED
10919         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10920         dev->vlan_rx_register = tg3_vlan_rx_register;
10921         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10922 #endif
10923
10924         tp = netdev_priv(dev);
10925         tp->pdev = pdev;
10926         tp->dev = dev;
10927         tp->pm_cap = pm_cap;
10928         tp->mac_mode = TG3_DEF_MAC_MODE;
10929         tp->rx_mode = TG3_DEF_RX_MODE;
10930         tp->tx_mode = TG3_DEF_TX_MODE;
10931         tp->mi_mode = MAC_MI_MODE_BASE;
10932         if (tg3_debug > 0)
10933                 tp->msg_enable = tg3_debug;
10934         else
10935                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10936
10937         /* The word/byte swap controls here control register access byte
10938          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10939          * setting below.
10940          */
10941         tp->misc_host_ctrl =
10942                 MISC_HOST_CTRL_MASK_PCI_INT |
10943                 MISC_HOST_CTRL_WORD_SWAP |
10944                 MISC_HOST_CTRL_INDIR_ACCESS |
10945                 MISC_HOST_CTRL_PCISTATE_RW;
10946
10947         /* The NONFRM (non-frame) byte/word swap controls take effect
10948          * on descriptor entries, anything which isn't packet data.
10949          *
10950          * The StrongARM chips on the board (one for tx, one for rx)
10951          * are running in big-endian mode.
10952          */
10953         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10954                         GRC_MODE_WSWAP_NONFRM_DATA);
10955 #ifdef __BIG_ENDIAN
10956         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10957 #endif
10958         spin_lock_init(&tp->lock);
10959         spin_lock_init(&tp->tx_lock);
10960         spin_lock_init(&tp->indirect_lock);
10961         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10962
10963         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10964         if (tp->regs == 0UL) {
10965                 printk(KERN_ERR PFX "Cannot map device registers, "
10966                        "aborting.\n");
10967                 err = -ENOMEM;
10968                 goto err_out_free_dev;
10969         }
10970
10971         tg3_init_link_config(tp);
10972
10973         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10974         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10975         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10976
10977         dev->open = tg3_open;
10978         dev->stop = tg3_close;
10979         dev->get_stats = tg3_get_stats;
10980         dev->set_multicast_list = tg3_set_rx_mode;
10981         dev->set_mac_address = tg3_set_mac_addr;
10982         dev->do_ioctl = tg3_ioctl;
10983         dev->tx_timeout = tg3_tx_timeout;
10984         dev->poll = tg3_poll;
10985         dev->ethtool_ops = &tg3_ethtool_ops;
10986         dev->weight = 64;
10987         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10988         dev->change_mtu = tg3_change_mtu;
10989         dev->irq = pdev->irq;
10990 #ifdef CONFIG_NET_POLL_CONTROLLER
10991         dev->poll_controller = tg3_poll_controller;
10992 #endif
10993
10994         err = tg3_get_invariants(tp);
10995         if (err) {
10996                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10997                        "aborting.\n");
10998                 goto err_out_iounmap;
10999         }
11000
11001         /* The EPB bridge inside 5714, 5715, and 5780 and any
11002          * device behind the EPB cannot support DMA addresses > 40-bit.
11003          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11004          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11005          * do DMA address check in tg3_start_xmit().
11006          */
11007         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11008                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11009         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11010                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11011 #ifdef CONFIG_HIGHMEM
11012                 dma_mask = DMA_64BIT_MASK;
11013 #endif
11014         } else
11015                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11016
11017         /* Configure DMA attributes. */
11018         if (dma_mask > DMA_32BIT_MASK) {
11019                 err = pci_set_dma_mask(pdev, dma_mask);
11020                 if (!err) {
11021                         dev->features |= NETIF_F_HIGHDMA;
11022                         err = pci_set_consistent_dma_mask(pdev,
11023                                                           persist_dma_mask);
11024                         if (err < 0) {
11025                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11026                                        "DMA for consistent allocations\n");
11027                                 goto err_out_iounmap;
11028                         }
11029                 }
11030         }
11031         if (err || dma_mask == DMA_32BIT_MASK) {
11032                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11033                 if (err) {
11034                         printk(KERN_ERR PFX "No usable DMA configuration, "
11035                                "aborting.\n");
11036                         goto err_out_iounmap;
11037                 }
11038         }
11039
11040         tg3_init_bufmgr_config(tp);
11041
11042 #if TG3_TSO_SUPPORT != 0
11043         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11044                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11045         }
11046         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11048             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11049             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11050                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11051         } else {
11052                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11053         }
11054
11055         /* TSO is on by default on chips that support hardware TSO.
11056          * Firmware TSO on older chips gives lower performance, so it
11057          * is off by default, but can be enabled using ethtool.
11058          */
11059         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11060                 dev->features |= NETIF_F_TSO;
11061
11062 #endif
11063
11064         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11065             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11066             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11067                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11068                 tp->rx_pending = 63;
11069         }
11070
11071         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11072             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11073                 tp->pdev_peer = tg3_find_peer(tp);
11074
11075         err = tg3_get_device_address(tp);
11076         if (err) {
11077                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11078                        "aborting.\n");
11079                 goto err_out_iounmap;
11080         }
11081
11082         /*
11083          * Reset chip in case UNDI or EFI driver did not shutdown
11084          * DMA self test will enable WDMAC and we'll see (spurious)
11085          * pending DMA on the PCI bus at that point.
11086          */
11087         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11088             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11089                 pci_save_state(tp->pdev);
11090                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11091                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11092         }
11093
11094         err = tg3_test_dma(tp);
11095         if (err) {
11096                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11097                 goto err_out_iounmap;
11098         }
11099
11100         /* Tigon3 can do ipv4 only... and some chips have buggy
11101          * checksumming.
11102          */
11103         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11104                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
11105                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11106         } else
11107                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11108
11109         /* flow control autonegotiation is default behavior */
11110         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11111
11112         tg3_init_coal(tp);
11113
11114         /* Now that we have fully setup the chip, save away a snapshot
11115          * of the PCI config space.  We need to restore this after
11116          * GRC_MISC_CFG core clock resets and some resume events.
11117          */
11118         pci_save_state(tp->pdev);
11119
11120         err = register_netdev(dev);
11121         if (err) {
11122                 printk(KERN_ERR PFX "Cannot register net device, "
11123                        "aborting.\n");
11124                 goto err_out_iounmap;
11125         }
11126
11127         pci_set_drvdata(pdev, dev);
11128
11129         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11130                dev->name,
11131                tp->board_part_number,
11132                tp->pci_chip_rev_id,
11133                tg3_phy_string(tp),
11134                tg3_bus_string(tp, str),
11135                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11136
11137         for (i = 0; i < 6; i++)
11138                 printk("%2.2x%c", dev->dev_addr[i],
11139                        i == 5 ? '\n' : ':');
11140
11141         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11142                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11143                "TSOcap[%d] \n",
11144                dev->name,
11145                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11146                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11147                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11148                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11149                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11150                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11151                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11152         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11153                dev->name, tp->dma_rwctrl,
11154                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11155                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11156
11157         return 0;
11158
11159 err_out_iounmap:
11160         if (tp->regs) {
11161                 iounmap(tp->regs);
11162                 tp->regs = NULL;
11163         }
11164
11165 err_out_free_dev:
11166         free_netdev(dev);
11167
11168 err_out_free_res:
11169         pci_release_regions(pdev);
11170
11171 err_out_disable_pdev:
11172         pci_disable_device(pdev);
11173         pci_set_drvdata(pdev, NULL);
11174         return err;
11175 }
11176
11177 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11178 {
11179         struct net_device *dev = pci_get_drvdata(pdev);
11180
11181         if (dev) {
11182                 struct tg3 *tp = netdev_priv(dev);
11183
11184                 flush_scheduled_work();
11185                 unregister_netdev(dev);
11186                 if (tp->regs) {
11187                         iounmap(tp->regs);
11188                         tp->regs = NULL;
11189                 }
11190                 free_netdev(dev);
11191                 pci_release_regions(pdev);
11192                 pci_disable_device(pdev);
11193                 pci_set_drvdata(pdev, NULL);
11194         }
11195 }
11196
11197 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11198 {
11199         struct net_device *dev = pci_get_drvdata(pdev);
11200         struct tg3 *tp = netdev_priv(dev);
11201         int err;
11202
11203         if (!netif_running(dev))
11204                 return 0;
11205
11206         flush_scheduled_work();
11207         tg3_netif_stop(tp);
11208
11209         del_timer_sync(&tp->timer);
11210
11211         tg3_full_lock(tp, 1);
11212         tg3_disable_ints(tp);
11213         tg3_full_unlock(tp);
11214
11215         netif_device_detach(dev);
11216
11217         tg3_full_lock(tp, 0);
11218         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11219         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11220         tg3_full_unlock(tp);
11221
11222         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11223         if (err) {
11224                 tg3_full_lock(tp, 0);
11225
11226                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11227                 tg3_init_hw(tp);
11228
11229                 tp->timer.expires = jiffies + tp->timer_offset;
11230                 add_timer(&tp->timer);
11231
11232                 netif_device_attach(dev);
11233                 tg3_netif_start(tp);
11234
11235                 tg3_full_unlock(tp);
11236         }
11237
11238         return err;
11239 }
11240
11241 static int tg3_resume(struct pci_dev *pdev)
11242 {
11243         struct net_device *dev = pci_get_drvdata(pdev);
11244         struct tg3 *tp = netdev_priv(dev);
11245         int err;
11246
11247         if (!netif_running(dev))
11248                 return 0;
11249
11250         pci_restore_state(tp->pdev);
11251
11252         err = tg3_set_power_state(tp, PCI_D0);
11253         if (err)
11254                 return err;
11255
11256         netif_device_attach(dev);
11257
11258         tg3_full_lock(tp, 0);
11259
11260         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11261         tg3_init_hw(tp);
11262
11263         tp->timer.expires = jiffies + tp->timer_offset;
11264         add_timer(&tp->timer);
11265
11266         tg3_netif_start(tp);
11267
11268         tg3_full_unlock(tp);
11269
11270         return 0;
11271 }
11272
11273 static struct pci_driver tg3_driver = {
11274         .name           = DRV_MODULE_NAME,
11275         .id_table       = tg3_pci_tbl,
11276         .probe          = tg3_init_one,
11277         .remove         = __devexit_p(tg3_remove_one),
11278         .suspend        = tg3_suspend,
11279         .resume         = tg3_resume
11280 };
11281
11282 static int __init tg3_init(void)
11283 {
11284         return pci_module_init(&tg3_driver);
11285 }
11286
11287 static void __exit tg3_cleanup(void)
11288 {
11289         pci_unregister_driver(&tg3_driver);
11290 }
11291
11292 module_init(tg3_init);
11293 module_exit(tg3_cleanup);