[TG3]: Add new hard_start_xmit
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.51"
73 #define DRV_MODULE_RELDATE      "Feb 21, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { 0, }
261 };
262
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264
265 static struct {
266         const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
268         { "rx_octets" },
269         { "rx_fragments" },
270         { "rx_ucast_packets" },
271         { "rx_mcast_packets" },
272         { "rx_bcast_packets" },
273         { "rx_fcs_errors" },
274         { "rx_align_errors" },
275         { "rx_xon_pause_rcvd" },
276         { "rx_xoff_pause_rcvd" },
277         { "rx_mac_ctrl_rcvd" },
278         { "rx_xoff_entered" },
279         { "rx_frame_too_long_errors" },
280         { "rx_jabbers" },
281         { "rx_undersize_packets" },
282         { "rx_in_length_errors" },
283         { "rx_out_length_errors" },
284         { "rx_64_or_less_octet_packets" },
285         { "rx_65_to_127_octet_packets" },
286         { "rx_128_to_255_octet_packets" },
287         { "rx_256_to_511_octet_packets" },
288         { "rx_512_to_1023_octet_packets" },
289         { "rx_1024_to_1522_octet_packets" },
290         { "rx_1523_to_2047_octet_packets" },
291         { "rx_2048_to_4095_octet_packets" },
292         { "rx_4096_to_8191_octet_packets" },
293         { "rx_8192_to_9022_octet_packets" },
294
295         { "tx_octets" },
296         { "tx_collisions" },
297
298         { "tx_xon_sent" },
299         { "tx_xoff_sent" },
300         { "tx_flow_control" },
301         { "tx_mac_errors" },
302         { "tx_single_collisions" },
303         { "tx_mult_collisions" },
304         { "tx_deferred" },
305         { "tx_excessive_collisions" },
306         { "tx_late_collisions" },
307         { "tx_collide_2times" },
308         { "tx_collide_3times" },
309         { "tx_collide_4times" },
310         { "tx_collide_5times" },
311         { "tx_collide_6times" },
312         { "tx_collide_7times" },
313         { "tx_collide_8times" },
314         { "tx_collide_9times" },
315         { "tx_collide_10times" },
316         { "tx_collide_11times" },
317         { "tx_collide_12times" },
318         { "tx_collide_13times" },
319         { "tx_collide_14times" },
320         { "tx_collide_15times" },
321         { "tx_ucast_packets" },
322         { "tx_mcast_packets" },
323         { "tx_bcast_packets" },
324         { "tx_carrier_sense_errors" },
325         { "tx_discards" },
326         { "tx_errors" },
327
328         { "dma_writeq_full" },
329         { "dma_write_prioq_full" },
330         { "rxbds_empty" },
331         { "rx_discards" },
332         { "rx_errors" },
333         { "rx_threshold_hit" },
334
335         { "dma_readq_full" },
336         { "dma_read_prioq_full" },
337         { "tx_comp_queue_full" },
338
339         { "ring_set_send_prod_index" },
340         { "ring_status_update" },
341         { "nic_irqs" },
342         { "nic_avoided_irqs" },
343         { "nic_tx_threshold_hit" }
344 };
345
346 static struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349         { "nvram test     (online) " },
350         { "link test      (online) " },
351         { "register test  (offline)" },
352         { "memory test    (offline)" },
353         { "loopback test  (offline)" },
354         { "interrupt test (offline)" },
355 };
356
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 {
359         writel(val, tp->regs + off);
360 }
361
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 {
364         return (readl(tp->regs + off)); 
365 }
366
367 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&tp->indirect_lock, flags);
372         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
374         spin_unlock_irqrestore(&tp->indirect_lock, flags);
375 }
376
377 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
378 {
379         writel(val, tp->regs + off);
380         readl(tp->regs + off);
381 }
382
383 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
384 {
385         unsigned long flags;
386         u32 val;
387
388         spin_lock_irqsave(&tp->indirect_lock, flags);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
390         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392         return val;
393 }
394
395 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 {
397         unsigned long flags;
398
399         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
400                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
401                                        TG3_64BIT_REG_LOW, val);
402                 return;
403         }
404         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409
410         spin_lock_irqsave(&tp->indirect_lock, flags);
411         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
412         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
413         spin_unlock_irqrestore(&tp->indirect_lock, flags);
414
415         /* In indirect mode when disabling interrupts, we also need
416          * to clear the interrupt bit in the GRC local ctrl register.
417          */
418         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
419             (val == 0x1)) {
420                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
421                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
422         }
423 }
424
425 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
426 {
427         unsigned long flags;
428         u32 val;
429
430         spin_lock_irqsave(&tp->indirect_lock, flags);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
432         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
433         spin_unlock_irqrestore(&tp->indirect_lock, flags);
434         return val;
435 }
436
437 /* usec_wait specifies the wait time in usec when writing to certain registers
438  * where it is unsafe to read back the register without some delay.
439  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
440  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
441  */
442 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
443 {
444         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
445             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
446                 /* Non-posted methods */
447                 tp->write32(tp, off, val);
448         else {
449                 /* Posted method */
450                 tg3_write32(tp, off, val);
451                 if (usec_wait)
452                         udelay(usec_wait);
453                 tp->read32(tp, off);
454         }
455         /* Wait again after the read for the posted method to guarantee that
456          * the wait time is met.
457          */
458         if (usec_wait)
459                 udelay(usec_wait);
460 }
461
462 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
463 {
464         tp->write32_mbox(tp, off, val);
465         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
466             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467                 tp->read32_mbox(tp, off);
468 }
469
470 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
471 {
472         void __iomem *mbox = tp->regs + off;
473         writel(val, mbox);
474         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
475                 writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
477                 readl(mbox);
478 }
479
480 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
481 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
482 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
483 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
484 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
485
486 #define tw32(reg,val)           tp->write32(tp, reg, val)
487 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
488 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
489 #define tr32(reg)               tp->read32(tp, reg)
490
491 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
498
499         /* Always leave this as zero. */
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
505 {
506         /* If no workaround is needed, write to mem space directly */
507         if (tp->write32 != tg3_write_indirect_reg32)
508                 tw32(NIC_SRAM_WIN_BASE + off, val);
509         else
510                 tg3_write_mem(tp, off, val);
511 }
512
513 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
514 {
515         unsigned long flags;
516
517         spin_lock_irqsave(&tp->indirect_lock, flags);
518         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
519         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
520
521         /* Always leave this as zero. */
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_disable_ints(struct tg3 *tp)
527 {
528         tw32(TG3PCI_MISC_HOST_CTRL,
529              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
531 }
532
533 static inline void tg3_cond_int(struct tg3 *tp)
534 {
535         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
536             (tp->hw_status->status & SD_STATUS_UPDATED))
537                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
538 }
539
540 static void tg3_enable_ints(struct tg3 *tp)
541 {
542         tp->irq_sync = 0;
543         wmb();
544
545         tw32(TG3PCI_MISC_HOST_CTRL,
546              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
548                        (tp->last_tag << 24));
549         tg3_cond_int(tp);
550 }
551
552 static inline unsigned int tg3_has_work(struct tg3 *tp)
553 {
554         struct tg3_hw_status *sblk = tp->hw_status;
555         unsigned int work_exists = 0;
556
557         /* check for phy events */
558         if (!(tp->tg3_flags &
559               (TG3_FLAG_USE_LINKCHG_REG |
560                TG3_FLAG_POLL_SERDES))) {
561                 if (sblk->status & SD_STATUS_LINK_CHG)
562                         work_exists = 1;
563         }
564         /* check for RX/TX work to do */
565         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
566             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
567                 work_exists = 1;
568
569         return work_exists;
570 }
571
572 /* tg3_restart_ints
573  *  similar to tg3_enable_ints, but it accurately determines whether there
574  *  is new work pending and can return without flushing the PIO write
575  *  which reenables interrupts 
576  */
577 static void tg3_restart_ints(struct tg3 *tp)
578 {
579         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
580                      tp->last_tag << 24);
581         mmiowb();
582
583         /* When doing tagged status, this work check is unnecessary.
584          * The last_tag we write above tells the chip which piece of
585          * work we've completed.
586          */
587         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
588             tg3_has_work(tp))
589                 tw32(HOSTCC_MODE, tp->coalesce_mode |
590                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
591 }
592
593 static inline void tg3_netif_stop(struct tg3 *tp)
594 {
595         tp->dev->trans_start = jiffies; /* prevent tx timeout */
596         netif_poll_disable(tp->dev);
597         netif_tx_disable(tp->dev);
598 }
599
600 static inline void tg3_netif_start(struct tg3 *tp)
601 {
602         netif_wake_queue(tp->dev);
603         /* NOTE: unconditional netif_wake_queue is only appropriate
604          * so long as all callers are assured to have free tx slots
605          * (such as after tg3_init_hw)
606          */
607         netif_poll_enable(tp->dev);
608         tp->hw_status->status |= SD_STATUS_UPDATED;
609         tg3_enable_ints(tp);
610 }
611
612 static void tg3_switch_clocks(struct tg3 *tp)
613 {
614         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
615         u32 orig_clock_ctrl;
616
617         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
618                 return;
619
620         orig_clock_ctrl = clock_ctrl;
621         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
622                        CLOCK_CTRL_CLKRUN_OENABLE |
623                        0x1f);
624         tp->pci_clock_ctrl = clock_ctrl;
625
626         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
627                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
628                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
629                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
630                 }
631         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
632                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
633                             clock_ctrl |
634                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
635                             40);
636                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
637                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
638                             40);
639         }
640         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
641 }
642
643 #define PHY_BUSY_LOOPS  5000
644
645 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
646 {
647         u32 frame_val;
648         unsigned int loops;
649         int ret;
650
651         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
652                 tw32_f(MAC_MI_MODE,
653                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
654                 udelay(80);
655         }
656
657         *val = 0x0;
658
659         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
660                       MI_COM_PHY_ADDR_MASK);
661         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
662                       MI_COM_REG_ADDR_MASK);
663         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
664         
665         tw32_f(MAC_MI_COM, frame_val);
666
667         loops = PHY_BUSY_LOOPS;
668         while (loops != 0) {
669                 udelay(10);
670                 frame_val = tr32(MAC_MI_COM);
671
672                 if ((frame_val & MI_COM_BUSY) == 0) {
673                         udelay(5);
674                         frame_val = tr32(MAC_MI_COM);
675                         break;
676                 }
677                 loops -= 1;
678         }
679
680         ret = -EBUSY;
681         if (loops != 0) {
682                 *val = frame_val & MI_COM_DATA_MASK;
683                 ret = 0;
684         }
685
686         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
687                 tw32_f(MAC_MI_MODE, tp->mi_mode);
688                 udelay(80);
689         }
690
691         return ret;
692 }
693
694 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
695 {
696         u32 frame_val;
697         unsigned int loops;
698         int ret;
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE,
702                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
703                 udelay(80);
704         }
705
706         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
707                       MI_COM_PHY_ADDR_MASK);
708         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
709                       MI_COM_REG_ADDR_MASK);
710         frame_val |= (val & MI_COM_DATA_MASK);
711         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
712         
713         tw32_f(MAC_MI_COM, frame_val);
714
715         loops = PHY_BUSY_LOOPS;
716         while (loops != 0) {
717                 udelay(10);
718                 frame_val = tr32(MAC_MI_COM);
719                 if ((frame_val & MI_COM_BUSY) == 0) {
720                         udelay(5);
721                         frame_val = tr32(MAC_MI_COM);
722                         break;
723                 }
724                 loops -= 1;
725         }
726
727         ret = -EBUSY;
728         if (loops != 0)
729                 ret = 0;
730
731         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
732                 tw32_f(MAC_MI_MODE, tp->mi_mode);
733                 udelay(80);
734         }
735
736         return ret;
737 }
738
739 static void tg3_phy_set_wirespeed(struct tg3 *tp)
740 {
741         u32 val;
742
743         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
744                 return;
745
746         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
747             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
748                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
749                              (val | (1 << 15) | (1 << 4)));
750 }
751
752 static int tg3_bmcr_reset(struct tg3 *tp)
753 {
754         u32 phy_control;
755         int limit, err;
756
757         /* OK, reset it, and poll the BMCR_RESET bit until it
758          * clears or we time out.
759          */
760         phy_control = BMCR_RESET;
761         err = tg3_writephy(tp, MII_BMCR, phy_control);
762         if (err != 0)
763                 return -EBUSY;
764
765         limit = 5000;
766         while (limit--) {
767                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
768                 if (err != 0)
769                         return -EBUSY;
770
771                 if ((phy_control & BMCR_RESET) == 0) {
772                         udelay(40);
773                         break;
774                 }
775                 udelay(10);
776         }
777         if (limit <= 0)
778                 return -EBUSY;
779
780         return 0;
781 }
782
783 static int tg3_wait_macro_done(struct tg3 *tp)
784 {
785         int limit = 100;
786
787         while (limit--) {
788                 u32 tmp32;
789
790                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
791                         if ((tmp32 & 0x1000) == 0)
792                                 break;
793                 }
794         }
795         if (limit <= 0)
796                 return -EBUSY;
797
798         return 0;
799 }
800
801 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
802 {
803         static const u32 test_pat[4][6] = {
804         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
805         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
806         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
807         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
808         };
809         int chan;
810
811         for (chan = 0; chan < 4; chan++) {
812                 int i;
813
814                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
815                              (chan * 0x2000) | 0x0200);
816                 tg3_writephy(tp, 0x16, 0x0002);
817
818                 for (i = 0; i < 6; i++)
819                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
820                                      test_pat[chan][i]);
821
822                 tg3_writephy(tp, 0x16, 0x0202);
823                 if (tg3_wait_macro_done(tp)) {
824                         *resetp = 1;
825                         return -EBUSY;
826                 }
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0082);
831                 if (tg3_wait_macro_done(tp)) {
832                         *resetp = 1;
833                         return -EBUSY;
834                 }
835
836                 tg3_writephy(tp, 0x16, 0x0802);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 for (i = 0; i < 6; i += 2) {
843                         u32 low, high;
844
845                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
846                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
847                             tg3_wait_macro_done(tp)) {
848                                 *resetp = 1;
849                                 return -EBUSY;
850                         }
851                         low &= 0x7fff;
852                         high &= 0x000f;
853                         if (low != test_pat[chan][i] ||
854                             high != test_pat[chan][i+1]) {
855                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
856                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
857                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
858
859                                 return -EBUSY;
860                         }
861                 }
862         }
863
864         return 0;
865 }
866
867 static int tg3_phy_reset_chanpat(struct tg3 *tp)
868 {
869         int chan;
870
871         for (chan = 0; chan < 4; chan++) {
872                 int i;
873
874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
875                              (chan * 0x2000) | 0x0200);
876                 tg3_writephy(tp, 0x16, 0x0002);
877                 for (i = 0; i < 6; i++)
878                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
879                 tg3_writephy(tp, 0x16, 0x0202);
880                 if (tg3_wait_macro_done(tp))
881                         return -EBUSY;
882         }
883
884         return 0;
885 }
886
887 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
888 {
889         u32 reg32, phy9_orig;
890         int retries, do_phy_reset, err;
891
892         retries = 10;
893         do_phy_reset = 1;
894         do {
895                 if (do_phy_reset) {
896                         err = tg3_bmcr_reset(tp);
897                         if (err)
898                                 return err;
899                         do_phy_reset = 0;
900                 }
901
902                 /* Disable transmitter and interrupt.  */
903                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
904                         continue;
905
906                 reg32 |= 0x3000;
907                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
908
909                 /* Set full-duplex, 1000 mbps.  */
910                 tg3_writephy(tp, MII_BMCR,
911                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
912
913                 /* Set to master mode.  */
914                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
915                         continue;
916
917                 tg3_writephy(tp, MII_TG3_CTRL,
918                              (MII_TG3_CTRL_AS_MASTER |
919                               MII_TG3_CTRL_ENABLE_AS_MASTER));
920
921                 /* Enable SM_DSP_CLOCK and 6dB.  */
922                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
923
924                 /* Block the PHY control access.  */
925                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
927
928                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
929                 if (!err)
930                         break;
931         } while (--retries);
932
933         err = tg3_phy_reset_chanpat(tp);
934         if (err)
935                 return err;
936
937         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
938         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
939
940         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
941         tg3_writephy(tp, 0x16, 0x0000);
942
943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
945                 /* Set Extended packet length bit for jumbo frames */
946                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
947         }
948         else {
949                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
950         }
951
952         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
953
954         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
955                 reg32 &= ~0x3000;
956                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
957         } else if (!err)
958                 err = -EBUSY;
959
960         return err;
961 }
962
963 /* This will reset the tigon3 PHY if there is no valid
964  * link unless the FORCE argument is non-zero.
965  */
966 static int tg3_phy_reset(struct tg3 *tp)
967 {
968         u32 phy_status;
969         int err;
970
971         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
972         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
973         if (err != 0)
974                 return -EBUSY;
975
976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
979                 err = tg3_phy_reset_5703_4_5(tp);
980                 if (err)
981                         return err;
982                 goto out;
983         }
984
985         err = tg3_bmcr_reset(tp);
986         if (err)
987                 return err;
988
989 out:
990         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
991                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
993                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
994                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
995                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
996                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
997         }
998         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
999                 tg3_writephy(tp, 0x1c, 0x8d68);
1000                 tg3_writephy(tp, 0x1c, 0x8d68);
1001         }
1002         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1004                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1005                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         /* Set Extended packet length bit (bit 14) on all chips that */
1013         /* support jumbo frames */
1014         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1015                 /* Cannot do read-modify-write on 5401 */
1016                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1017         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1018                 u32 phy_reg;
1019
1020                 /* Set bit 14 with read-modify-write to preserve other bits */
1021                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1022                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1023                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1024         }
1025
1026         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1027          * jumbo frames transmission.
1028          */
1029         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1030                 u32 phy_reg;
1031
1032                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1033                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1034                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1035         }
1036
1037         tg3_phy_set_wirespeed(tp);
1038         return 0;
1039 }
1040
1041 static void tg3_frob_aux_power(struct tg3 *tp)
1042 {
1043         struct tg3 *tp_peer = tp;
1044
1045         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1046                 return;
1047
1048         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1049             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1050                 struct net_device *dev_peer;
1051
1052                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1053                 /* remove_one() may have been run on the peer. */
1054                 if (!dev_peer)
1055                         tp_peer = tp;
1056                 else
1057                         tp_peer = netdev_priv(dev_peer);
1058         }
1059
1060         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1061             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1062             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1063             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1064                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1065                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1066                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1067                                     (GRC_LCLCTRL_GPIO_OE0 |
1068                                      GRC_LCLCTRL_GPIO_OE1 |
1069                                      GRC_LCLCTRL_GPIO_OE2 |
1070                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1071                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1072                                     100);
1073                 } else {
1074                         u32 no_gpio2;
1075                         u32 grc_local_ctrl = 0;
1076
1077                         if (tp_peer != tp &&
1078                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1079                                 return;
1080
1081                         /* Workaround to prevent overdrawing Amps. */
1082                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1083                             ASIC_REV_5714) {
1084                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1085                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1086                                             grc_local_ctrl, 100);
1087                         }
1088
1089                         /* On 5753 and variants, GPIO2 cannot be used. */
1090                         no_gpio2 = tp->nic_sram_data_cfg &
1091                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1092
1093                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1094                                          GRC_LCLCTRL_GPIO_OE1 |
1095                                          GRC_LCLCTRL_GPIO_OE2 |
1096                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1097                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1098                         if (no_gpio2) {
1099                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1100                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1101                         }
1102                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103                                                     grc_local_ctrl, 100);
1104
1105                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1106
1107                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1108                                                     grc_local_ctrl, 100);
1109
1110                         if (!no_gpio2) {
1111                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1112                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1113                                             grc_local_ctrl, 100);
1114                         }
1115                 }
1116         } else {
1117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1118                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1119                         if (tp_peer != tp &&
1120                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1121                                 return;
1122
1123                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124                                     (GRC_LCLCTRL_GPIO_OE1 |
1125                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1126
1127                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1128                                     GRC_LCLCTRL_GPIO_OE1, 100);
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     (GRC_LCLCTRL_GPIO_OE1 |
1132                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1133                 }
1134         }
1135 }
1136
1137 static int tg3_setup_phy(struct tg3 *, int);
1138
1139 #define RESET_KIND_SHUTDOWN     0
1140 #define RESET_KIND_INIT         1
1141 #define RESET_KIND_SUSPEND      2
1142
1143 static void tg3_write_sig_post_reset(struct tg3 *, int);
1144 static int tg3_halt_cpu(struct tg3 *, u32);
1145 static int tg3_nvram_lock(struct tg3 *);
1146 static void tg3_nvram_unlock(struct tg3 *);
1147
1148 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1149 {
1150         u32 misc_host_ctrl;
1151         u16 power_control, power_caps;
1152         int pm = tp->pm_cap;
1153
1154         /* Make sure register accesses (indirect or otherwise)
1155          * will function correctly.
1156          */
1157         pci_write_config_dword(tp->pdev,
1158                                TG3PCI_MISC_HOST_CTRL,
1159                                tp->misc_host_ctrl);
1160
1161         pci_read_config_word(tp->pdev,
1162                              pm + PCI_PM_CTRL,
1163                              &power_control);
1164         power_control |= PCI_PM_CTRL_PME_STATUS;
1165         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1166         switch (state) {
1167         case PCI_D0:
1168                 power_control |= 0;
1169                 pci_write_config_word(tp->pdev,
1170                                       pm + PCI_PM_CTRL,
1171                                       power_control);
1172                 udelay(100);    /* Delay after power state change */
1173
1174                 /* Switch out of Vaux if it is not a LOM */
1175                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1176                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1177
1178                 return 0;
1179
1180         case PCI_D1:
1181                 power_control |= 1;
1182                 break;
1183
1184         case PCI_D2:
1185                 power_control |= 2;
1186                 break;
1187
1188         case PCI_D3hot:
1189                 power_control |= 3;
1190                 break;
1191
1192         default:
1193                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1194                        "requested.\n",
1195                        tp->dev->name, state);
1196                 return -EINVAL;
1197         };
1198
1199         power_control |= PCI_PM_CTRL_PME_ENABLE;
1200
1201         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1202         tw32(TG3PCI_MISC_HOST_CTRL,
1203              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1204
1205         if (tp->link_config.phy_is_low_power == 0) {
1206                 tp->link_config.phy_is_low_power = 1;
1207                 tp->link_config.orig_speed = tp->link_config.speed;
1208                 tp->link_config.orig_duplex = tp->link_config.duplex;
1209                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1210         }
1211
1212         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1213                 tp->link_config.speed = SPEED_10;
1214                 tp->link_config.duplex = DUPLEX_HALF;
1215                 tp->link_config.autoneg = AUTONEG_ENABLE;
1216                 tg3_setup_phy(tp, 0);
1217         }
1218
1219         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1220                 int i;
1221                 u32 val;
1222
1223                 for (i = 0; i < 200; i++) {
1224                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1225                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1226                                 break;
1227                         msleep(1);
1228                 }
1229         }
1230         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1231                                              WOL_DRV_STATE_SHUTDOWN |
1232                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1233
1234         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1235
1236         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1237                 u32 mac_mode;
1238
1239                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1240                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1241                         udelay(40);
1242
1243                         mac_mode = MAC_MODE_PORT_MODE_MII;
1244
1245                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1246                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1247                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1248                 } else {
1249                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1250                 }
1251
1252                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1253                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1254
1255                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1256                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1257                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1258
1259                 tw32_f(MAC_MODE, mac_mode);
1260                 udelay(100);
1261
1262                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1263                 udelay(10);
1264         }
1265
1266         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1267             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1268              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1269                 u32 base_val;
1270
1271                 base_val = tp->pci_clock_ctrl;
1272                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1273                              CLOCK_CTRL_TXCLK_DISABLE);
1274
1275                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1276                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1277         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1278                 /* do nothing */
1279         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1280                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1281                 u32 newbits1, newbits2;
1282
1283                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1284                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1285                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1286                                     CLOCK_CTRL_TXCLK_DISABLE |
1287                                     CLOCK_CTRL_ALTCLK);
1288                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1289                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1290                         newbits1 = CLOCK_CTRL_625_CORE;
1291                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1292                 } else {
1293                         newbits1 = CLOCK_CTRL_ALTCLK;
1294                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1295                 }
1296
1297                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1298                             40);
1299
1300                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1301                             40);
1302
1303                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1304                         u32 newbits3;
1305
1306                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1307                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1308                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1309                                             CLOCK_CTRL_TXCLK_DISABLE |
1310                                             CLOCK_CTRL_44MHZ_CORE);
1311                         } else {
1312                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1313                         }
1314
1315                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1316                                     tp->pci_clock_ctrl | newbits3, 40);
1317                 }
1318         }
1319
1320         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1321             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1322                 /* Turn off the PHY */
1323                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1324                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1325                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1326                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1327                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1328                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1329                 }
1330         }
1331
1332         tg3_frob_aux_power(tp);
1333
1334         /* Workaround for unstable PLL clock */
1335         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1336             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1337                 u32 val = tr32(0x7d00);
1338
1339                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1340                 tw32(0x7d00, val);
1341                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1342                         int err;
1343
1344                         err = tg3_nvram_lock(tp);
1345                         tg3_halt_cpu(tp, RX_CPU_BASE);
1346                         if (!err)
1347                                 tg3_nvram_unlock(tp);
1348                 }
1349         }
1350
1351         /* Finally, set the new power state. */
1352         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1353         udelay(100);    /* Delay after power state change */
1354
1355         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1356
1357         return 0;
1358 }
1359
1360 static void tg3_link_report(struct tg3 *tp)
1361 {
1362         if (!netif_carrier_ok(tp->dev)) {
1363                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1364         } else {
1365                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1366                        tp->dev->name,
1367                        (tp->link_config.active_speed == SPEED_1000 ?
1368                         1000 :
1369                         (tp->link_config.active_speed == SPEED_100 ?
1370                          100 : 10)),
1371                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1372                         "full" : "half"));
1373
1374                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1375                        "%s for RX.\n",
1376                        tp->dev->name,
1377                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1378                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1379         }
1380 }
1381
1382 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1383 {
1384         u32 new_tg3_flags = 0;
1385         u32 old_rx_mode = tp->rx_mode;
1386         u32 old_tx_mode = tp->tx_mode;
1387
1388         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1389
1390                 /* Convert 1000BaseX flow control bits to 1000BaseT
1391                  * bits before resolving flow control.
1392                  */
1393                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1394                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1395                                        ADVERTISE_PAUSE_ASYM);
1396                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1397
1398                         if (local_adv & ADVERTISE_1000XPAUSE)
1399                                 local_adv |= ADVERTISE_PAUSE_CAP;
1400                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1401                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1402                         if (remote_adv & LPA_1000XPAUSE)
1403                                 remote_adv |= LPA_PAUSE_CAP;
1404                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1405                                 remote_adv |= LPA_PAUSE_ASYM;
1406                 }
1407
1408                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1409                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410                                 if (remote_adv & LPA_PAUSE_CAP)
1411                                         new_tg3_flags |=
1412                                                 (TG3_FLAG_RX_PAUSE |
1413                                                 TG3_FLAG_TX_PAUSE);
1414                                 else if (remote_adv & LPA_PAUSE_ASYM)
1415                                         new_tg3_flags |=
1416                                                 (TG3_FLAG_RX_PAUSE);
1417                         } else {
1418                                 if (remote_adv & LPA_PAUSE_CAP)
1419                                         new_tg3_flags |=
1420                                                 (TG3_FLAG_RX_PAUSE |
1421                                                 TG3_FLAG_TX_PAUSE);
1422                         }
1423                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1424                         if ((remote_adv & LPA_PAUSE_CAP) &&
1425                         (remote_adv & LPA_PAUSE_ASYM))
1426                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1427                 }
1428
1429                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1430                 tp->tg3_flags |= new_tg3_flags;
1431         } else {
1432                 new_tg3_flags = tp->tg3_flags;
1433         }
1434
1435         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1436                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1437         else
1438                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1439
1440         if (old_rx_mode != tp->rx_mode) {
1441                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1442         }
1443         
1444         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1445                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1446         else
1447                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1448
1449         if (old_tx_mode != tp->tx_mode) {
1450                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1451         }
1452 }
1453
1454 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1455 {
1456         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1457         case MII_TG3_AUX_STAT_10HALF:
1458                 *speed = SPEED_10;
1459                 *duplex = DUPLEX_HALF;
1460                 break;
1461
1462         case MII_TG3_AUX_STAT_10FULL:
1463                 *speed = SPEED_10;
1464                 *duplex = DUPLEX_FULL;
1465                 break;
1466
1467         case MII_TG3_AUX_STAT_100HALF:
1468                 *speed = SPEED_100;
1469                 *duplex = DUPLEX_HALF;
1470                 break;
1471
1472         case MII_TG3_AUX_STAT_100FULL:
1473                 *speed = SPEED_100;
1474                 *duplex = DUPLEX_FULL;
1475                 break;
1476
1477         case MII_TG3_AUX_STAT_1000HALF:
1478                 *speed = SPEED_1000;
1479                 *duplex = DUPLEX_HALF;
1480                 break;
1481
1482         case MII_TG3_AUX_STAT_1000FULL:
1483                 *speed = SPEED_1000;
1484                 *duplex = DUPLEX_FULL;
1485                 break;
1486
1487         default:
1488                 *speed = SPEED_INVALID;
1489                 *duplex = DUPLEX_INVALID;
1490                 break;
1491         };
1492 }
1493
1494 static void tg3_phy_copper_begin(struct tg3 *tp)
1495 {
1496         u32 new_adv;
1497         int i;
1498
1499         if (tp->link_config.phy_is_low_power) {
1500                 /* Entering low power mode.  Disable gigabit and
1501                  * 100baseT advertisements.
1502                  */
1503                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1504
1505                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1506                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1507                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1508                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1509
1510                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1511         } else if (tp->link_config.speed == SPEED_INVALID) {
1512                 tp->link_config.advertising =
1513                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1514                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1515                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1516                          ADVERTISED_Autoneg | ADVERTISED_MII);
1517
1518                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1519                         tp->link_config.advertising &=
1520                                 ~(ADVERTISED_1000baseT_Half |
1521                                   ADVERTISED_1000baseT_Full);
1522
1523                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1524                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1525                         new_adv |= ADVERTISE_10HALF;
1526                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1527                         new_adv |= ADVERTISE_10FULL;
1528                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1529                         new_adv |= ADVERTISE_100HALF;
1530                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1531                         new_adv |= ADVERTISE_100FULL;
1532                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1533
1534                 if (tp->link_config.advertising &
1535                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1536                         new_adv = 0;
1537                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1538                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1539                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1540                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1541                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1542                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1544                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1545                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1546                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1547                 } else {
1548                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1549                 }
1550         } else {
1551                 /* Asking for a specific link mode. */
1552                 if (tp->link_config.speed == SPEED_1000) {
1553                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1554                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1555
1556                         if (tp->link_config.duplex == DUPLEX_FULL)
1557                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1558                         else
1559                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1560                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1561                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1562                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1563                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1564                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1565                 } else {
1566                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1567
1568                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1569                         if (tp->link_config.speed == SPEED_100) {
1570                                 if (tp->link_config.duplex == DUPLEX_FULL)
1571                                         new_adv |= ADVERTISE_100FULL;
1572                                 else
1573                                         new_adv |= ADVERTISE_100HALF;
1574                         } else {
1575                                 if (tp->link_config.duplex == DUPLEX_FULL)
1576                                         new_adv |= ADVERTISE_10FULL;
1577                                 else
1578                                         new_adv |= ADVERTISE_10HALF;
1579                         }
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581                 }
1582         }
1583
1584         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1585             tp->link_config.speed != SPEED_INVALID) {
1586                 u32 bmcr, orig_bmcr;
1587
1588                 tp->link_config.active_speed = tp->link_config.speed;
1589                 tp->link_config.active_duplex = tp->link_config.duplex;
1590
1591                 bmcr = 0;
1592                 switch (tp->link_config.speed) {
1593                 default:
1594                 case SPEED_10:
1595                         break;
1596
1597                 case SPEED_100:
1598                         bmcr |= BMCR_SPEED100;
1599                         break;
1600
1601                 case SPEED_1000:
1602                         bmcr |= TG3_BMCR_SPEED1000;
1603                         break;
1604                 };
1605
1606                 if (tp->link_config.duplex == DUPLEX_FULL)
1607                         bmcr |= BMCR_FULLDPLX;
1608
1609                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1610                     (bmcr != orig_bmcr)) {
1611                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1612                         for (i = 0; i < 1500; i++) {
1613                                 u32 tmp;
1614
1615                                 udelay(10);
1616                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1617                                     tg3_readphy(tp, MII_BMSR, &tmp))
1618                                         continue;
1619                                 if (!(tmp & BMSR_LSTATUS)) {
1620                                         udelay(40);
1621                                         break;
1622                                 }
1623                         }
1624                         tg3_writephy(tp, MII_BMCR, bmcr);
1625                         udelay(40);
1626                 }
1627         } else {
1628                 tg3_writephy(tp, MII_BMCR,
1629                              BMCR_ANENABLE | BMCR_ANRESTART);
1630         }
1631 }
1632
1633 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1634 {
1635         int err;
1636
1637         /* Turn off tap power management. */
1638         /* Set Extended packet length bit */
1639         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1640
1641         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1642         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1643
1644         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1645         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1646
1647         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1648         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1649
1650         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1651         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1652
1653         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1654         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1655
1656         udelay(40);
1657
1658         return err;
1659 }
1660
1661 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1662 {
1663         u32 adv_reg, all_mask;
1664
1665         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1666                 return 0;
1667
1668         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1669                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1670         if ((adv_reg & all_mask) != all_mask)
1671                 return 0;
1672         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1673                 u32 tg3_ctrl;
1674
1675                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1676                         return 0;
1677
1678                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1679                             MII_TG3_CTRL_ADV_1000_FULL);
1680                 if ((tg3_ctrl & all_mask) != all_mask)
1681                         return 0;
1682         }
1683         return 1;
1684 }
1685
1686 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1687 {
1688         int current_link_up;
1689         u32 bmsr, dummy;
1690         u16 current_speed;
1691         u8 current_duplex;
1692         int i, err;
1693
1694         tw32(MAC_EVENT, 0);
1695
1696         tw32_f(MAC_STATUS,
1697              (MAC_STATUS_SYNC_CHANGED |
1698               MAC_STATUS_CFG_CHANGED |
1699               MAC_STATUS_MI_COMPLETION |
1700               MAC_STATUS_LNKSTATE_CHANGED));
1701         udelay(40);
1702
1703         tp->mi_mode = MAC_MI_MODE_BASE;
1704         tw32_f(MAC_MI_MODE, tp->mi_mode);
1705         udelay(80);
1706
1707         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1708
1709         /* Some third-party PHYs need to be reset on link going
1710          * down.
1711          */
1712         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1713              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1714              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1715             netif_carrier_ok(tp->dev)) {
1716                 tg3_readphy(tp, MII_BMSR, &bmsr);
1717                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1718                     !(bmsr & BMSR_LSTATUS))
1719                         force_reset = 1;
1720         }
1721         if (force_reset)
1722                 tg3_phy_reset(tp);
1723
1724         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1725                 tg3_readphy(tp, MII_BMSR, &bmsr);
1726                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1727                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1728                         bmsr = 0;
1729
1730                 if (!(bmsr & BMSR_LSTATUS)) {
1731                         err = tg3_init_5401phy_dsp(tp);
1732                         if (err)
1733                                 return err;
1734
1735                         tg3_readphy(tp, MII_BMSR, &bmsr);
1736                         for (i = 0; i < 1000; i++) {
1737                                 udelay(10);
1738                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1739                                     (bmsr & BMSR_LSTATUS)) {
1740                                         udelay(40);
1741                                         break;
1742                                 }
1743                         }
1744
1745                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1746                             !(bmsr & BMSR_LSTATUS) &&
1747                             tp->link_config.active_speed == SPEED_1000) {
1748                                 err = tg3_phy_reset(tp);
1749                                 if (!err)
1750                                         err = tg3_init_5401phy_dsp(tp);
1751                                 if (err)
1752                                         return err;
1753                         }
1754                 }
1755         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1756                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1757                 /* 5701 {A0,B0} CRC bug workaround */
1758                 tg3_writephy(tp, 0x15, 0x0a75);
1759                 tg3_writephy(tp, 0x1c, 0x8c68);
1760                 tg3_writephy(tp, 0x1c, 0x8d68);
1761                 tg3_writephy(tp, 0x1c, 0x8c68);
1762         }
1763
1764         /* Clear pending interrupts... */
1765         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1766         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1767
1768         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1769                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1770         else
1771                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1772
1773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1774             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1775                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1776                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1777                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1778                 else
1779                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1780         }
1781
1782         current_link_up = 0;
1783         current_speed = SPEED_INVALID;
1784         current_duplex = DUPLEX_INVALID;
1785
1786         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1787                 u32 val;
1788
1789                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1790                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1791                 if (!(val & (1 << 10))) {
1792                         val |= (1 << 10);
1793                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1794                         goto relink;
1795                 }
1796         }
1797
1798         bmsr = 0;
1799         for (i = 0; i < 100; i++) {
1800                 tg3_readphy(tp, MII_BMSR, &bmsr);
1801                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1802                     (bmsr & BMSR_LSTATUS))
1803                         break;
1804                 udelay(40);
1805         }
1806
1807         if (bmsr & BMSR_LSTATUS) {
1808                 u32 aux_stat, bmcr;
1809
1810                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1811                 for (i = 0; i < 2000; i++) {
1812                         udelay(10);
1813                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1814                             aux_stat)
1815                                 break;
1816                 }
1817
1818                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1819                                              &current_speed,
1820                                              &current_duplex);
1821
1822                 bmcr = 0;
1823                 for (i = 0; i < 200; i++) {
1824                         tg3_readphy(tp, MII_BMCR, &bmcr);
1825                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1826                                 continue;
1827                         if (bmcr && bmcr != 0x7fff)
1828                                 break;
1829                         udelay(10);
1830                 }
1831
1832                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1833                         if (bmcr & BMCR_ANENABLE) {
1834                                 current_link_up = 1;
1835
1836                                 /* Force autoneg restart if we are exiting
1837                                  * low power mode.
1838                                  */
1839                                 if (!tg3_copper_is_advertising_all(tp))
1840                                         current_link_up = 0;
1841                         } else {
1842                                 current_link_up = 0;
1843                         }
1844                 } else {
1845                         if (!(bmcr & BMCR_ANENABLE) &&
1846                             tp->link_config.speed == current_speed &&
1847                             tp->link_config.duplex == current_duplex) {
1848                                 current_link_up = 1;
1849                         } else {
1850                                 current_link_up = 0;
1851                         }
1852                 }
1853
1854                 tp->link_config.active_speed = current_speed;
1855                 tp->link_config.active_duplex = current_duplex;
1856         }
1857
1858         if (current_link_up == 1 &&
1859             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1860             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1861                 u32 local_adv, remote_adv;
1862
1863                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1864                         local_adv = 0;
1865                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1866
1867                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1868                         remote_adv = 0;
1869
1870                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1871
1872                 /* If we are not advertising full pause capability,
1873                  * something is wrong.  Bring the link down and reconfigure.
1874                  */
1875                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1876                         current_link_up = 0;
1877                 } else {
1878                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1879                 }
1880         }
1881 relink:
1882         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1883                 u32 tmp;
1884
1885                 tg3_phy_copper_begin(tp);
1886
1887                 tg3_readphy(tp, MII_BMSR, &tmp);
1888                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1889                     (tmp & BMSR_LSTATUS))
1890                         current_link_up = 1;
1891         }
1892
1893         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1894         if (current_link_up == 1) {
1895                 if (tp->link_config.active_speed == SPEED_100 ||
1896                     tp->link_config.active_speed == SPEED_10)
1897                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1898                 else
1899                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1900         } else
1901                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1902
1903         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1904         if (tp->link_config.active_duplex == DUPLEX_HALF)
1905                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1906
1907         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1909                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1910                     (current_link_up == 1 &&
1911                      tp->link_config.active_speed == SPEED_10))
1912                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1913         } else {
1914                 if (current_link_up == 1)
1915                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1916         }
1917
1918         /* ??? Without this setting Netgear GA302T PHY does not
1919          * ??? send/receive packets...
1920          */
1921         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1922             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1923                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1924                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1925                 udelay(80);
1926         }
1927
1928         tw32_f(MAC_MODE, tp->mac_mode);
1929         udelay(40);
1930
1931         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1932                 /* Polled via timer. */
1933                 tw32_f(MAC_EVENT, 0);
1934         } else {
1935                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1936         }
1937         udelay(40);
1938
1939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1940             current_link_up == 1 &&
1941             tp->link_config.active_speed == SPEED_1000 &&
1942             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1943              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1944                 udelay(120);
1945                 tw32_f(MAC_STATUS,
1946                      (MAC_STATUS_SYNC_CHANGED |
1947                       MAC_STATUS_CFG_CHANGED));
1948                 udelay(40);
1949                 tg3_write_mem(tp,
1950                               NIC_SRAM_FIRMWARE_MBOX,
1951                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1952         }
1953
1954         if (current_link_up != netif_carrier_ok(tp->dev)) {
1955                 if (current_link_up)
1956                         netif_carrier_on(tp->dev);
1957                 else
1958                         netif_carrier_off(tp->dev);
1959                 tg3_link_report(tp);
1960         }
1961
1962         return 0;
1963 }
1964
1965 struct tg3_fiber_aneginfo {
1966         int state;
1967 #define ANEG_STATE_UNKNOWN              0
1968 #define ANEG_STATE_AN_ENABLE            1
1969 #define ANEG_STATE_RESTART_INIT         2
1970 #define ANEG_STATE_RESTART              3
1971 #define ANEG_STATE_DISABLE_LINK_OK      4
1972 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1973 #define ANEG_STATE_ABILITY_DETECT       6
1974 #define ANEG_STATE_ACK_DETECT_INIT      7
1975 #define ANEG_STATE_ACK_DETECT           8
1976 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1977 #define ANEG_STATE_COMPLETE_ACK         10
1978 #define ANEG_STATE_IDLE_DETECT_INIT     11
1979 #define ANEG_STATE_IDLE_DETECT          12
1980 #define ANEG_STATE_LINK_OK              13
1981 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1982 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1983
1984         u32 flags;
1985 #define MR_AN_ENABLE            0x00000001
1986 #define MR_RESTART_AN           0x00000002
1987 #define MR_AN_COMPLETE          0x00000004
1988 #define MR_PAGE_RX              0x00000008
1989 #define MR_NP_LOADED            0x00000010
1990 #define MR_TOGGLE_TX            0x00000020
1991 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1992 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1993 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1994 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1995 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1996 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1997 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1998 #define MR_TOGGLE_RX            0x00002000
1999 #define MR_NP_RX                0x00004000
2000
2001 #define MR_LINK_OK              0x80000000
2002
2003         unsigned long link_time, cur_time;
2004
2005         u32 ability_match_cfg;
2006         int ability_match_count;
2007
2008         char ability_match, idle_match, ack_match;
2009
2010         u32 txconfig, rxconfig;
2011 #define ANEG_CFG_NP             0x00000080
2012 #define ANEG_CFG_ACK            0x00000040
2013 #define ANEG_CFG_RF2            0x00000020
2014 #define ANEG_CFG_RF1            0x00000010
2015 #define ANEG_CFG_PS2            0x00000001
2016 #define ANEG_CFG_PS1            0x00008000
2017 #define ANEG_CFG_HD             0x00004000
2018 #define ANEG_CFG_FD             0x00002000
2019 #define ANEG_CFG_INVAL          0x00001f06
2020
2021 };
2022 #define ANEG_OK         0
2023 #define ANEG_DONE       1
2024 #define ANEG_TIMER_ENAB 2
2025 #define ANEG_FAILED     -1
2026
2027 #define ANEG_STATE_SETTLE_TIME  10000
2028
2029 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2030                                    struct tg3_fiber_aneginfo *ap)
2031 {
2032         unsigned long delta;
2033         u32 rx_cfg_reg;
2034         int ret;
2035
2036         if (ap->state == ANEG_STATE_UNKNOWN) {
2037                 ap->rxconfig = 0;
2038                 ap->link_time = 0;
2039                 ap->cur_time = 0;
2040                 ap->ability_match_cfg = 0;
2041                 ap->ability_match_count = 0;
2042                 ap->ability_match = 0;
2043                 ap->idle_match = 0;
2044                 ap->ack_match = 0;
2045         }
2046         ap->cur_time++;
2047
2048         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2049                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2050
2051                 if (rx_cfg_reg != ap->ability_match_cfg) {
2052                         ap->ability_match_cfg = rx_cfg_reg;
2053                         ap->ability_match = 0;
2054                         ap->ability_match_count = 0;
2055                 } else {
2056                         if (++ap->ability_match_count > 1) {
2057                                 ap->ability_match = 1;
2058                                 ap->ability_match_cfg = rx_cfg_reg;
2059                         }
2060                 }
2061                 if (rx_cfg_reg & ANEG_CFG_ACK)
2062                         ap->ack_match = 1;
2063                 else
2064                         ap->ack_match = 0;
2065
2066                 ap->idle_match = 0;
2067         } else {
2068                 ap->idle_match = 1;
2069                 ap->ability_match_cfg = 0;
2070                 ap->ability_match_count = 0;
2071                 ap->ability_match = 0;
2072                 ap->ack_match = 0;
2073
2074                 rx_cfg_reg = 0;
2075         }
2076
2077         ap->rxconfig = rx_cfg_reg;
2078         ret = ANEG_OK;
2079
2080         switch(ap->state) {
2081         case ANEG_STATE_UNKNOWN:
2082                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2083                         ap->state = ANEG_STATE_AN_ENABLE;
2084
2085                 /* fallthru */
2086         case ANEG_STATE_AN_ENABLE:
2087                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2088                 if (ap->flags & MR_AN_ENABLE) {
2089                         ap->link_time = 0;
2090                         ap->cur_time = 0;
2091                         ap->ability_match_cfg = 0;
2092                         ap->ability_match_count = 0;
2093                         ap->ability_match = 0;
2094                         ap->idle_match = 0;
2095                         ap->ack_match = 0;
2096
2097                         ap->state = ANEG_STATE_RESTART_INIT;
2098                 } else {
2099                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2100                 }
2101                 break;
2102
2103         case ANEG_STATE_RESTART_INIT:
2104                 ap->link_time = ap->cur_time;
2105                 ap->flags &= ~(MR_NP_LOADED);
2106                 ap->txconfig = 0;
2107                 tw32(MAC_TX_AUTO_NEG, 0);
2108                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2109                 tw32_f(MAC_MODE, tp->mac_mode);
2110                 udelay(40);
2111
2112                 ret = ANEG_TIMER_ENAB;
2113                 ap->state = ANEG_STATE_RESTART;
2114
2115                 /* fallthru */
2116         case ANEG_STATE_RESTART:
2117                 delta = ap->cur_time - ap->link_time;
2118                 if (delta > ANEG_STATE_SETTLE_TIME) {
2119                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2120                 } else {
2121                         ret = ANEG_TIMER_ENAB;
2122                 }
2123                 break;
2124
2125         case ANEG_STATE_DISABLE_LINK_OK:
2126                 ret = ANEG_DONE;
2127                 break;
2128
2129         case ANEG_STATE_ABILITY_DETECT_INIT:
2130                 ap->flags &= ~(MR_TOGGLE_TX);
2131                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2132                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2133                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2134                 tw32_f(MAC_MODE, tp->mac_mode);
2135                 udelay(40);
2136
2137                 ap->state = ANEG_STATE_ABILITY_DETECT;
2138                 break;
2139
2140         case ANEG_STATE_ABILITY_DETECT:
2141                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2142                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2143                 }
2144                 break;
2145
2146         case ANEG_STATE_ACK_DETECT_INIT:
2147                 ap->txconfig |= ANEG_CFG_ACK;
2148                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2149                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2150                 tw32_f(MAC_MODE, tp->mac_mode);
2151                 udelay(40);
2152
2153                 ap->state = ANEG_STATE_ACK_DETECT;
2154
2155                 /* fallthru */
2156         case ANEG_STATE_ACK_DETECT:
2157                 if (ap->ack_match != 0) {
2158                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2159                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2160                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2161                         } else {
2162                                 ap->state = ANEG_STATE_AN_ENABLE;
2163                         }
2164                 } else if (ap->ability_match != 0 &&
2165                            ap->rxconfig == 0) {
2166                         ap->state = ANEG_STATE_AN_ENABLE;
2167                 }
2168                 break;
2169
2170         case ANEG_STATE_COMPLETE_ACK_INIT:
2171                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2172                         ret = ANEG_FAILED;
2173                         break;
2174                 }
2175                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2176                                MR_LP_ADV_HALF_DUPLEX |
2177                                MR_LP_ADV_SYM_PAUSE |
2178                                MR_LP_ADV_ASYM_PAUSE |
2179                                MR_LP_ADV_REMOTE_FAULT1 |
2180                                MR_LP_ADV_REMOTE_FAULT2 |
2181                                MR_LP_ADV_NEXT_PAGE |
2182                                MR_TOGGLE_RX |
2183                                MR_NP_RX);
2184                 if (ap->rxconfig & ANEG_CFG_FD)
2185                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2186                 if (ap->rxconfig & ANEG_CFG_HD)
2187                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2188                 if (ap->rxconfig & ANEG_CFG_PS1)
2189                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2190                 if (ap->rxconfig & ANEG_CFG_PS2)
2191                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2192                 if (ap->rxconfig & ANEG_CFG_RF1)
2193                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2194                 if (ap->rxconfig & ANEG_CFG_RF2)
2195                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2196                 if (ap->rxconfig & ANEG_CFG_NP)
2197                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2198
2199                 ap->link_time = ap->cur_time;
2200
2201                 ap->flags ^= (MR_TOGGLE_TX);
2202                 if (ap->rxconfig & 0x0008)
2203                         ap->flags |= MR_TOGGLE_RX;
2204                 if (ap->rxconfig & ANEG_CFG_NP)
2205                         ap->flags |= MR_NP_RX;
2206                 ap->flags |= MR_PAGE_RX;
2207
2208                 ap->state = ANEG_STATE_COMPLETE_ACK;
2209                 ret = ANEG_TIMER_ENAB;
2210                 break;
2211
2212         case ANEG_STATE_COMPLETE_ACK:
2213                 if (ap->ability_match != 0 &&
2214                     ap->rxconfig == 0) {
2215                         ap->state = ANEG_STATE_AN_ENABLE;
2216                         break;
2217                 }
2218                 delta = ap->cur_time - ap->link_time;
2219                 if (delta > ANEG_STATE_SETTLE_TIME) {
2220                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2221                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2222                         } else {
2223                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2224                                     !(ap->flags & MR_NP_RX)) {
2225                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2226                                 } else {
2227                                         ret = ANEG_FAILED;
2228                                 }
2229                         }
2230                 }
2231                 break;
2232
2233         case ANEG_STATE_IDLE_DETECT_INIT:
2234                 ap->link_time = ap->cur_time;
2235                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2236                 tw32_f(MAC_MODE, tp->mac_mode);
2237                 udelay(40);
2238
2239                 ap->state = ANEG_STATE_IDLE_DETECT;
2240                 ret = ANEG_TIMER_ENAB;
2241                 break;
2242
2243         case ANEG_STATE_IDLE_DETECT:
2244                 if (ap->ability_match != 0 &&
2245                     ap->rxconfig == 0) {
2246                         ap->state = ANEG_STATE_AN_ENABLE;
2247                         break;
2248                 }
2249                 delta = ap->cur_time - ap->link_time;
2250                 if (delta > ANEG_STATE_SETTLE_TIME) {
2251                         /* XXX another gem from the Broadcom driver :( */
2252                         ap->state = ANEG_STATE_LINK_OK;
2253                 }
2254                 break;
2255
2256         case ANEG_STATE_LINK_OK:
2257                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2258                 ret = ANEG_DONE;
2259                 break;
2260
2261         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2262                 /* ??? unimplemented */
2263                 break;
2264
2265         case ANEG_STATE_NEXT_PAGE_WAIT:
2266                 /* ??? unimplemented */
2267                 break;
2268
2269         default:
2270                 ret = ANEG_FAILED;
2271                 break;
2272         };
2273
2274         return ret;
2275 }
2276
2277 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2278 {
2279         int res = 0;
2280         struct tg3_fiber_aneginfo aninfo;
2281         int status = ANEG_FAILED;
2282         unsigned int tick;
2283         u32 tmp;
2284
2285         tw32_f(MAC_TX_AUTO_NEG, 0);
2286
2287         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2288         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2289         udelay(40);
2290
2291         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2292         udelay(40);
2293
2294         memset(&aninfo, 0, sizeof(aninfo));
2295         aninfo.flags |= MR_AN_ENABLE;
2296         aninfo.state = ANEG_STATE_UNKNOWN;
2297         aninfo.cur_time = 0;
2298         tick = 0;
2299         while (++tick < 195000) {
2300                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2301                 if (status == ANEG_DONE || status == ANEG_FAILED)
2302                         break;
2303
2304                 udelay(1);
2305         }
2306
2307         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2308         tw32_f(MAC_MODE, tp->mac_mode);
2309         udelay(40);
2310
2311         *flags = aninfo.flags;
2312
2313         if (status == ANEG_DONE &&
2314             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2315                              MR_LP_ADV_FULL_DUPLEX)))
2316                 res = 1;
2317
2318         return res;
2319 }
2320
2321 static void tg3_init_bcm8002(struct tg3 *tp)
2322 {
2323         u32 mac_status = tr32(MAC_STATUS);
2324         int i;
2325
2326         /* Reset when initting first time or we have a link. */
2327         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2328             !(mac_status & MAC_STATUS_PCS_SYNCED))
2329                 return;
2330
2331         /* Set PLL lock range. */
2332         tg3_writephy(tp, 0x16, 0x8007);
2333
2334         /* SW reset */
2335         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2336
2337         /* Wait for reset to complete. */
2338         /* XXX schedule_timeout() ... */
2339         for (i = 0; i < 500; i++)
2340                 udelay(10);
2341
2342         /* Config mode; select PMA/Ch 1 regs. */
2343         tg3_writephy(tp, 0x10, 0x8411);
2344
2345         /* Enable auto-lock and comdet, select txclk for tx. */
2346         tg3_writephy(tp, 0x11, 0x0a10);
2347
2348         tg3_writephy(tp, 0x18, 0x00a0);
2349         tg3_writephy(tp, 0x16, 0x41ff);
2350
2351         /* Assert and deassert POR. */
2352         tg3_writephy(tp, 0x13, 0x0400);
2353         udelay(40);
2354         tg3_writephy(tp, 0x13, 0x0000);
2355
2356         tg3_writephy(tp, 0x11, 0x0a50);
2357         udelay(40);
2358         tg3_writephy(tp, 0x11, 0x0a10);
2359
2360         /* Wait for signal to stabilize */
2361         /* XXX schedule_timeout() ... */
2362         for (i = 0; i < 15000; i++)
2363                 udelay(10);
2364
2365         /* Deselect the channel register so we can read the PHYID
2366          * later.
2367          */
2368         tg3_writephy(tp, 0x10, 0x8011);
2369 }
2370
2371 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2372 {
2373         u32 sg_dig_ctrl, sg_dig_status;
2374         u32 serdes_cfg, expected_sg_dig_ctrl;
2375         int workaround, port_a;
2376         int current_link_up;
2377
2378         serdes_cfg = 0;
2379         expected_sg_dig_ctrl = 0;
2380         workaround = 0;
2381         port_a = 1;
2382         current_link_up = 0;
2383
2384         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2385             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2386                 workaround = 1;
2387                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2388                         port_a = 0;
2389
2390                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2391                 /* preserve bits 20-23 for voltage regulator */
2392                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2393         }
2394
2395         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2396
2397         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2398                 if (sg_dig_ctrl & (1 << 31)) {
2399                         if (workaround) {
2400                                 u32 val = serdes_cfg;
2401
2402                                 if (port_a)
2403                                         val |= 0xc010000;
2404                                 else
2405                                         val |= 0x4010000;
2406                                 tw32_f(MAC_SERDES_CFG, val);
2407                         }
2408                         tw32_f(SG_DIG_CTRL, 0x01388400);
2409                 }
2410                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2411                         tg3_setup_flow_control(tp, 0, 0);
2412                         current_link_up = 1;
2413                 }
2414                 goto out;
2415         }
2416
2417         /* Want auto-negotiation.  */
2418         expected_sg_dig_ctrl = 0x81388400;
2419
2420         /* Pause capability */
2421         expected_sg_dig_ctrl |= (1 << 11);
2422
2423         /* Asymettric pause */
2424         expected_sg_dig_ctrl |= (1 << 12);
2425
2426         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2427                 if (workaround)
2428                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2429                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2430                 udelay(5);
2431                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2432
2433                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2434         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2435                                  MAC_STATUS_SIGNAL_DET)) {
2436                 int i;
2437
2438                 /* Giver time to negotiate (~200ms) */
2439                 for (i = 0; i < 40000; i++) {
2440                         sg_dig_status = tr32(SG_DIG_STATUS);
2441                         if (sg_dig_status & (0x3))
2442                                 break;
2443                         udelay(5);
2444                 }
2445                 mac_status = tr32(MAC_STATUS);
2446
2447                 if ((sg_dig_status & (1 << 1)) &&
2448                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2449                         u32 local_adv, remote_adv;
2450
2451                         local_adv = ADVERTISE_PAUSE_CAP;
2452                         remote_adv = 0;
2453                         if (sg_dig_status & (1 << 19))
2454                                 remote_adv |= LPA_PAUSE_CAP;
2455                         if (sg_dig_status & (1 << 20))
2456                                 remote_adv |= LPA_PAUSE_ASYM;
2457
2458                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2459                         current_link_up = 1;
2460                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2461                 } else if (!(sg_dig_status & (1 << 1))) {
2462                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2463                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2464                         else {
2465                                 if (workaround) {
2466                                         u32 val = serdes_cfg;
2467
2468                                         if (port_a)
2469                                                 val |= 0xc010000;
2470                                         else
2471                                                 val |= 0x4010000;
2472
2473                                         tw32_f(MAC_SERDES_CFG, val);
2474                                 }
2475
2476                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2477                                 udelay(40);
2478
2479                                 /* Link parallel detection - link is up */
2480                                 /* only if we have PCS_SYNC and not */
2481                                 /* receiving config code words */
2482                                 mac_status = tr32(MAC_STATUS);
2483                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2484                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2485                                         tg3_setup_flow_control(tp, 0, 0);
2486                                         current_link_up = 1;
2487                                 }
2488                         }
2489                 }
2490         }
2491
2492 out:
2493         return current_link_up;
2494 }
2495
2496 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2497 {
2498         int current_link_up = 0;
2499
2500         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2501                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2502                 goto out;
2503         }
2504
2505         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2506                 u32 flags;
2507                 int i;
2508   
2509                 if (fiber_autoneg(tp, &flags)) {
2510                         u32 local_adv, remote_adv;
2511
2512                         local_adv = ADVERTISE_PAUSE_CAP;
2513                         remote_adv = 0;
2514                         if (flags & MR_LP_ADV_SYM_PAUSE)
2515                                 remote_adv |= LPA_PAUSE_CAP;
2516                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2517                                 remote_adv |= LPA_PAUSE_ASYM;
2518
2519                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2520
2521                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2522                         current_link_up = 1;
2523                 }
2524                 for (i = 0; i < 30; i++) {
2525                         udelay(20);
2526                         tw32_f(MAC_STATUS,
2527                                (MAC_STATUS_SYNC_CHANGED |
2528                                 MAC_STATUS_CFG_CHANGED));
2529                         udelay(40);
2530                         if ((tr32(MAC_STATUS) &
2531                              (MAC_STATUS_SYNC_CHANGED |
2532                               MAC_STATUS_CFG_CHANGED)) == 0)
2533                                 break;
2534                 }
2535
2536                 mac_status = tr32(MAC_STATUS);
2537                 if (current_link_up == 0 &&
2538                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2539                     !(mac_status & MAC_STATUS_RCVD_CFG))
2540                         current_link_up = 1;
2541         } else {
2542                 /* Forcing 1000FD link up. */
2543                 current_link_up = 1;
2544                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2545
2546                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2547                 udelay(40);
2548         }
2549
2550 out:
2551         return current_link_up;
2552 }
2553
2554 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2555 {
2556         u32 orig_pause_cfg;
2557         u16 orig_active_speed;
2558         u8 orig_active_duplex;
2559         u32 mac_status;
2560         int current_link_up;
2561         int i;
2562
2563         orig_pause_cfg =
2564                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2565                                   TG3_FLAG_TX_PAUSE));
2566         orig_active_speed = tp->link_config.active_speed;
2567         orig_active_duplex = tp->link_config.active_duplex;
2568
2569         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2570             netif_carrier_ok(tp->dev) &&
2571             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2572                 mac_status = tr32(MAC_STATUS);
2573                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2574                                MAC_STATUS_SIGNAL_DET |
2575                                MAC_STATUS_CFG_CHANGED |
2576                                MAC_STATUS_RCVD_CFG);
2577                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2578                                    MAC_STATUS_SIGNAL_DET)) {
2579                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2580                                             MAC_STATUS_CFG_CHANGED));
2581                         return 0;
2582                 }
2583         }
2584
2585         tw32_f(MAC_TX_AUTO_NEG, 0);
2586
2587         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2588         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2589         tw32_f(MAC_MODE, tp->mac_mode);
2590         udelay(40);
2591
2592         if (tp->phy_id == PHY_ID_BCM8002)
2593                 tg3_init_bcm8002(tp);
2594
2595         /* Enable link change event even when serdes polling.  */
2596         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2597         udelay(40);
2598
2599         current_link_up = 0;
2600         mac_status = tr32(MAC_STATUS);
2601
2602         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2603                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2604         else
2605                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2606
2607         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2608         tw32_f(MAC_MODE, tp->mac_mode);
2609         udelay(40);
2610
2611         tp->hw_status->status =
2612                 (SD_STATUS_UPDATED |
2613                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2614
2615         for (i = 0; i < 100; i++) {
2616                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2617                                     MAC_STATUS_CFG_CHANGED));
2618                 udelay(5);
2619                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2620                                          MAC_STATUS_CFG_CHANGED)) == 0)
2621                         break;
2622         }
2623
2624         mac_status = tr32(MAC_STATUS);
2625         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2626                 current_link_up = 0;
2627                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2628                         tw32_f(MAC_MODE, (tp->mac_mode |
2629                                           MAC_MODE_SEND_CONFIGS));
2630                         udelay(1);
2631                         tw32_f(MAC_MODE, tp->mac_mode);
2632                 }
2633         }
2634
2635         if (current_link_up == 1) {
2636                 tp->link_config.active_speed = SPEED_1000;
2637                 tp->link_config.active_duplex = DUPLEX_FULL;
2638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2639                                     LED_CTRL_LNKLED_OVERRIDE |
2640                                     LED_CTRL_1000MBPS_ON));
2641         } else {
2642                 tp->link_config.active_speed = SPEED_INVALID;
2643                 tp->link_config.active_duplex = DUPLEX_INVALID;
2644                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2645                                     LED_CTRL_LNKLED_OVERRIDE |
2646                                     LED_CTRL_TRAFFIC_OVERRIDE));
2647         }
2648
2649         if (current_link_up != netif_carrier_ok(tp->dev)) {
2650                 if (current_link_up)
2651                         netif_carrier_on(tp->dev);
2652                 else
2653                         netif_carrier_off(tp->dev);
2654                 tg3_link_report(tp);
2655         } else {
2656                 u32 now_pause_cfg =
2657                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2658                                          TG3_FLAG_TX_PAUSE);
2659                 if (orig_pause_cfg != now_pause_cfg ||
2660                     orig_active_speed != tp->link_config.active_speed ||
2661                     orig_active_duplex != tp->link_config.active_duplex)
2662                         tg3_link_report(tp);
2663         }
2664
2665         return 0;
2666 }
2667
2668 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2669 {
2670         int current_link_up, err = 0;
2671         u32 bmsr, bmcr;
2672         u16 current_speed;
2673         u8 current_duplex;
2674
2675         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2676         tw32_f(MAC_MODE, tp->mac_mode);
2677         udelay(40);
2678
2679         tw32(MAC_EVENT, 0);
2680
2681         tw32_f(MAC_STATUS,
2682              (MAC_STATUS_SYNC_CHANGED |
2683               MAC_STATUS_CFG_CHANGED |
2684               MAC_STATUS_MI_COMPLETION |
2685               MAC_STATUS_LNKSTATE_CHANGED));
2686         udelay(40);
2687
2688         if (force_reset)
2689                 tg3_phy_reset(tp);
2690
2691         current_link_up = 0;
2692         current_speed = SPEED_INVALID;
2693         current_duplex = DUPLEX_INVALID;
2694
2695         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2698                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2699                         bmsr |= BMSR_LSTATUS;
2700                 else
2701                         bmsr &= ~BMSR_LSTATUS;
2702         }
2703
2704         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2705
2706         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2707             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2708                 /* do nothing, just check for link up at the end */
2709         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2710                 u32 adv, new_adv;
2711
2712                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2713                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2714                                   ADVERTISE_1000XPAUSE |
2715                                   ADVERTISE_1000XPSE_ASYM |
2716                                   ADVERTISE_SLCT);
2717
2718                 /* Always advertise symmetric PAUSE just like copper */
2719                 new_adv |= ADVERTISE_1000XPAUSE;
2720
2721                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2722                         new_adv |= ADVERTISE_1000XHALF;
2723                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2724                         new_adv |= ADVERTISE_1000XFULL;
2725
2726                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2727                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2729                         tg3_writephy(tp, MII_BMCR, bmcr);
2730
2731                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2732                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2733                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734
2735                         return err;
2736                 }
2737         } else {
2738                 u32 new_bmcr;
2739
2740                 bmcr &= ~BMCR_SPEED1000;
2741                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2742
2743                 if (tp->link_config.duplex == DUPLEX_FULL)
2744                         new_bmcr |= BMCR_FULLDPLX;
2745
2746                 if (new_bmcr != bmcr) {
2747                         /* BMCR_SPEED1000 is a reserved bit that needs
2748                          * to be set on write.
2749                          */
2750                         new_bmcr |= BMCR_SPEED1000;
2751
2752                         /* Force a linkdown */
2753                         if (netif_carrier_ok(tp->dev)) {
2754                                 u32 adv;
2755
2756                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2757                                 adv &= ~(ADVERTISE_1000XFULL |
2758                                          ADVERTISE_1000XHALF |
2759                                          ADVERTISE_SLCT);
2760                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2761                                 tg3_writephy(tp, MII_BMCR, bmcr |
2762                                                            BMCR_ANRESTART |
2763                                                            BMCR_ANENABLE);
2764                                 udelay(10);
2765                                 netif_carrier_off(tp->dev);
2766                         }
2767                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2768                         bmcr = new_bmcr;
2769                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2770                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2772                             ASIC_REV_5714) {
2773                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2774                                         bmsr |= BMSR_LSTATUS;
2775                                 else
2776                                         bmsr &= ~BMSR_LSTATUS;
2777                         }
2778                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2779                 }
2780         }
2781
2782         if (bmsr & BMSR_LSTATUS) {
2783                 current_speed = SPEED_1000;
2784                 current_link_up = 1;
2785                 if (bmcr & BMCR_FULLDPLX)
2786                         current_duplex = DUPLEX_FULL;
2787                 else
2788                         current_duplex = DUPLEX_HALF;
2789
2790                 if (bmcr & BMCR_ANENABLE) {
2791                         u32 local_adv, remote_adv, common;
2792
2793                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2794                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2795                         common = local_adv & remote_adv;
2796                         if (common & (ADVERTISE_1000XHALF |
2797                                       ADVERTISE_1000XFULL)) {
2798                                 if (common & ADVERTISE_1000XFULL)
2799                                         current_duplex = DUPLEX_FULL;
2800                                 else
2801                                         current_duplex = DUPLEX_HALF;
2802
2803                                 tg3_setup_flow_control(tp, local_adv,
2804                                                        remote_adv);
2805                         }
2806                         else
2807                                 current_link_up = 0;
2808                 }
2809         }
2810
2811         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812         if (tp->link_config.active_duplex == DUPLEX_HALF)
2813                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2814
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2819
2820         tp->link_config.active_speed = current_speed;
2821         tp->link_config.active_duplex = current_duplex;
2822
2823         if (current_link_up != netif_carrier_ok(tp->dev)) {
2824                 if (current_link_up)
2825                         netif_carrier_on(tp->dev);
2826                 else {
2827                         netif_carrier_off(tp->dev);
2828                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2829                 }
2830                 tg3_link_report(tp);
2831         }
2832         return err;
2833 }
2834
2835 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2836 {
2837         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2838                 /* Give autoneg time to complete. */
2839                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2840                 return;
2841         }
2842         if (!netif_carrier_ok(tp->dev) &&
2843             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2844                 u32 bmcr;
2845
2846                 tg3_readphy(tp, MII_BMCR, &bmcr);
2847                 if (bmcr & BMCR_ANENABLE) {
2848                         u32 phy1, phy2;
2849
2850                         /* Select shadow register 0x1f */
2851                         tg3_writephy(tp, 0x1c, 0x7c00);
2852                         tg3_readphy(tp, 0x1c, &phy1);
2853
2854                         /* Select expansion interrupt status register */
2855                         tg3_writephy(tp, 0x17, 0x0f01);
2856                         tg3_readphy(tp, 0x15, &phy2);
2857                         tg3_readphy(tp, 0x15, &phy2);
2858
2859                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2860                                 /* We have signal detect and not receiving
2861                                  * config code words, link is up by parallel
2862                                  * detection.
2863                                  */
2864
2865                                 bmcr &= ~BMCR_ANENABLE;
2866                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2867                                 tg3_writephy(tp, MII_BMCR, bmcr);
2868                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2869                         }
2870                 }
2871         }
2872         else if (netif_carrier_ok(tp->dev) &&
2873                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2874                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2875                 u32 phy2;
2876
2877                 /* Select expansion interrupt status register */
2878                 tg3_writephy(tp, 0x17, 0x0f01);
2879                 tg3_readphy(tp, 0x15, &phy2);
2880                 if (phy2 & 0x20) {
2881                         u32 bmcr;
2882
2883                         /* Config code words received, turn on autoneg. */
2884                         tg3_readphy(tp, MII_BMCR, &bmcr);
2885                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2886
2887                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888
2889                 }
2890         }
2891 }
2892
2893 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2894 {
2895         int err;
2896
2897         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2898                 err = tg3_setup_fiber_phy(tp, force_reset);
2899         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2900                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2901         } else {
2902                 err = tg3_setup_copper_phy(tp, force_reset);
2903         }
2904
2905         if (tp->link_config.active_speed == SPEED_1000 &&
2906             tp->link_config.active_duplex == DUPLEX_HALF)
2907                 tw32(MAC_TX_LENGTHS,
2908                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2909                       (6 << TX_LENGTHS_IPG_SHIFT) |
2910                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2911         else
2912                 tw32(MAC_TX_LENGTHS,
2913                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2914                       (6 << TX_LENGTHS_IPG_SHIFT) |
2915                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2916
2917         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2918                 if (netif_carrier_ok(tp->dev)) {
2919                         tw32(HOSTCC_STAT_COAL_TICKS,
2920                              tp->coal.stats_block_coalesce_usecs);
2921                 } else {
2922                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2923                 }
2924         }
2925
2926         return err;
2927 }
2928
2929 /* Tigon3 never reports partial packet sends.  So we do not
2930  * need special logic to handle SKBs that have not had all
2931  * of their frags sent yet, like SunGEM does.
2932  */
2933 static void tg3_tx(struct tg3 *tp)
2934 {
2935         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2936         u32 sw_idx = tp->tx_cons;
2937
2938         while (sw_idx != hw_idx) {
2939                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2940                 struct sk_buff *skb = ri->skb;
2941                 int i;
2942
2943                 if (unlikely(skb == NULL))
2944                         BUG();
2945
2946                 pci_unmap_single(tp->pdev,
2947                                  pci_unmap_addr(ri, mapping),
2948                                  skb_headlen(skb),
2949                                  PCI_DMA_TODEVICE);
2950
2951                 ri->skb = NULL;
2952
2953                 sw_idx = NEXT_TX(sw_idx);
2954
2955                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2956                         if (unlikely(sw_idx == hw_idx))
2957                                 BUG();
2958
2959                         ri = &tp->tx_buffers[sw_idx];
2960                         if (unlikely(ri->skb != NULL))
2961                                 BUG();
2962
2963                         pci_unmap_page(tp->pdev,
2964                                        pci_unmap_addr(ri, mapping),
2965                                        skb_shinfo(skb)->frags[i].size,
2966                                        PCI_DMA_TODEVICE);
2967
2968                         sw_idx = NEXT_TX(sw_idx);
2969                 }
2970
2971                 dev_kfree_skb(skb);
2972         }
2973
2974         tp->tx_cons = sw_idx;
2975
2976         if (unlikely(netif_queue_stopped(tp->dev))) {
2977                 spin_lock(&tp->tx_lock);
2978                 if (netif_queue_stopped(tp->dev) &&
2979                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2980                         netif_wake_queue(tp->dev);
2981                 spin_unlock(&tp->tx_lock);
2982         }
2983 }
2984
2985 /* Returns size of skb allocated or < 0 on error.
2986  *
2987  * We only need to fill in the address because the other members
2988  * of the RX descriptor are invariant, see tg3_init_rings.
2989  *
2990  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2991  * posting buffers we only dirty the first cache line of the RX
2992  * descriptor (containing the address).  Whereas for the RX status
2993  * buffers the cpu only reads the last cacheline of the RX descriptor
2994  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2995  */
2996 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2997                             int src_idx, u32 dest_idx_unmasked)
2998 {
2999         struct tg3_rx_buffer_desc *desc;
3000         struct ring_info *map, *src_map;
3001         struct sk_buff *skb;
3002         dma_addr_t mapping;
3003         int skb_size, dest_idx;
3004
3005         src_map = NULL;
3006         switch (opaque_key) {
3007         case RXD_OPAQUE_RING_STD:
3008                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3009                 desc = &tp->rx_std[dest_idx];
3010                 map = &tp->rx_std_buffers[dest_idx];
3011                 if (src_idx >= 0)
3012                         src_map = &tp->rx_std_buffers[src_idx];
3013                 skb_size = tp->rx_pkt_buf_sz;
3014                 break;
3015
3016         case RXD_OPAQUE_RING_JUMBO:
3017                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3018                 desc = &tp->rx_jumbo[dest_idx];
3019                 map = &tp->rx_jumbo_buffers[dest_idx];
3020                 if (src_idx >= 0)
3021                         src_map = &tp->rx_jumbo_buffers[src_idx];
3022                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3023                 break;
3024
3025         default:
3026                 return -EINVAL;
3027         };
3028
3029         /* Do not overwrite any of the map or rp information
3030          * until we are sure we can commit to a new buffer.
3031          *
3032          * Callers depend upon this behavior and assume that
3033          * we leave everything unchanged if we fail.
3034          */
3035         skb = dev_alloc_skb(skb_size);
3036         if (skb == NULL)
3037                 return -ENOMEM;
3038
3039         skb->dev = tp->dev;
3040         skb_reserve(skb, tp->rx_offset);
3041
3042         mapping = pci_map_single(tp->pdev, skb->data,
3043                                  skb_size - tp->rx_offset,
3044                                  PCI_DMA_FROMDEVICE);
3045
3046         map->skb = skb;
3047         pci_unmap_addr_set(map, mapping, mapping);
3048
3049         if (src_map != NULL)
3050                 src_map->skb = NULL;
3051
3052         desc->addr_hi = ((u64)mapping >> 32);
3053         desc->addr_lo = ((u64)mapping & 0xffffffff);
3054
3055         return skb_size;
3056 }
3057
3058 /* We only need to move over in the address because the other
3059  * members of the RX descriptor are invariant.  See notes above
3060  * tg3_alloc_rx_skb for full details.
3061  */
3062 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3063                            int src_idx, u32 dest_idx_unmasked)
3064 {
3065         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3066         struct ring_info *src_map, *dest_map;
3067         int dest_idx;
3068
3069         switch (opaque_key) {
3070         case RXD_OPAQUE_RING_STD:
3071                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3072                 dest_desc = &tp->rx_std[dest_idx];
3073                 dest_map = &tp->rx_std_buffers[dest_idx];
3074                 src_desc = &tp->rx_std[src_idx];
3075                 src_map = &tp->rx_std_buffers[src_idx];
3076                 break;
3077
3078         case RXD_OPAQUE_RING_JUMBO:
3079                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3080                 dest_desc = &tp->rx_jumbo[dest_idx];
3081                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3082                 src_desc = &tp->rx_jumbo[src_idx];
3083                 src_map = &tp->rx_jumbo_buffers[src_idx];
3084                 break;
3085
3086         default:
3087                 return;
3088         };
3089
3090         dest_map->skb = src_map->skb;
3091         pci_unmap_addr_set(dest_map, mapping,
3092                            pci_unmap_addr(src_map, mapping));
3093         dest_desc->addr_hi = src_desc->addr_hi;
3094         dest_desc->addr_lo = src_desc->addr_lo;
3095
3096         src_map->skb = NULL;
3097 }
3098
3099 #if TG3_VLAN_TAG_USED
3100 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3101 {
3102         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3103 }
3104 #endif
3105
3106 /* The RX ring scheme is composed of multiple rings which post fresh
3107  * buffers to the chip, and one special ring the chip uses to report
3108  * status back to the host.
3109  *
3110  * The special ring reports the status of received packets to the
3111  * host.  The chip does not write into the original descriptor the
3112  * RX buffer was obtained from.  The chip simply takes the original
3113  * descriptor as provided by the host, updates the status and length
3114  * field, then writes this into the next status ring entry.
3115  *
3116  * Each ring the host uses to post buffers to the chip is described
3117  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3118  * it is first placed into the on-chip ram.  When the packet's length
3119  * is known, it walks down the TG3_BDINFO entries to select the ring.
3120  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3121  * which is within the range of the new packet's length is chosen.
3122  *
3123  * The "separate ring for rx status" scheme may sound queer, but it makes
3124  * sense from a cache coherency perspective.  If only the host writes
3125  * to the buffer post rings, and only the chip writes to the rx status
3126  * rings, then cache lines never move beyond shared-modified state.
3127  * If both the host and chip were to write into the same ring, cache line
3128  * eviction could occur since both entities want it in an exclusive state.
3129  */
3130 static int tg3_rx(struct tg3 *tp, int budget)
3131 {
3132         u32 work_mask;
3133         u32 sw_idx = tp->rx_rcb_ptr;
3134         u16 hw_idx;
3135         int received;
3136
3137         hw_idx = tp->hw_status->idx[0].rx_producer;
3138         /*
3139          * We need to order the read of hw_idx and the read of
3140          * the opaque cookie.
3141          */
3142         rmb();
3143         work_mask = 0;
3144         received = 0;
3145         while (sw_idx != hw_idx && budget > 0) {
3146                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3147                 unsigned int len;
3148                 struct sk_buff *skb;
3149                 dma_addr_t dma_addr;
3150                 u32 opaque_key, desc_idx, *post_ptr;
3151
3152                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3153                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3154                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3155                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3156                                                   mapping);
3157                         skb = tp->rx_std_buffers[desc_idx].skb;
3158                         post_ptr = &tp->rx_std_ptr;
3159                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3160                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3161                                                   mapping);
3162                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3163                         post_ptr = &tp->rx_jumbo_ptr;
3164                 }
3165                 else {
3166                         goto next_pkt_nopost;
3167                 }
3168
3169                 work_mask |= opaque_key;
3170
3171                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3172                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3173                 drop_it:
3174                         tg3_recycle_rx(tp, opaque_key,
3175                                        desc_idx, *post_ptr);
3176                 drop_it_no_recycle:
3177                         /* Other statistics kept track of by card. */
3178                         tp->net_stats.rx_dropped++;
3179                         goto next_pkt;
3180                 }
3181
3182                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3183
3184                 if (len > RX_COPY_THRESHOLD 
3185                         && tp->rx_offset == 2
3186                         /* rx_offset != 2 iff this is a 5701 card running
3187                          * in PCI-X mode [see tg3_get_invariants()] */
3188                 ) {
3189                         int skb_size;
3190
3191                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3192                                                     desc_idx, *post_ptr);
3193                         if (skb_size < 0)
3194                                 goto drop_it;
3195
3196                         pci_unmap_single(tp->pdev, dma_addr,
3197                                          skb_size - tp->rx_offset,
3198                                          PCI_DMA_FROMDEVICE);
3199
3200                         skb_put(skb, len);
3201                 } else {
3202                         struct sk_buff *copy_skb;
3203
3204                         tg3_recycle_rx(tp, opaque_key,
3205                                        desc_idx, *post_ptr);
3206
3207                         copy_skb = dev_alloc_skb(len + 2);
3208                         if (copy_skb == NULL)
3209                                 goto drop_it_no_recycle;
3210
3211                         copy_skb->dev = tp->dev;
3212                         skb_reserve(copy_skb, 2);
3213                         skb_put(copy_skb, len);
3214                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3215                         memcpy(copy_skb->data, skb->data, len);
3216                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3217
3218                         /* We'll reuse the original ring buffer. */
3219                         skb = copy_skb;
3220                 }
3221
3222                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3223                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3224                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3225                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3226                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3227                 else
3228                         skb->ip_summed = CHECKSUM_NONE;
3229
3230                 skb->protocol = eth_type_trans(skb, tp->dev);
3231 #if TG3_VLAN_TAG_USED
3232                 if (tp->vlgrp != NULL &&
3233                     desc->type_flags & RXD_FLAG_VLAN) {
3234                         tg3_vlan_rx(tp, skb,
3235                                     desc->err_vlan & RXD_VLAN_MASK);
3236                 } else
3237 #endif
3238                         netif_receive_skb(skb);
3239
3240                 tp->dev->last_rx = jiffies;
3241                 received++;
3242                 budget--;
3243
3244 next_pkt:
3245                 (*post_ptr)++;
3246 next_pkt_nopost:
3247                 sw_idx++;
3248                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3249
3250                 /* Refresh hw_idx to see if there is new work */
3251                 if (sw_idx == hw_idx) {
3252                         hw_idx = tp->hw_status->idx[0].rx_producer;
3253                         rmb();
3254                 }
3255         }
3256
3257         /* ACK the status ring. */
3258         tp->rx_rcb_ptr = sw_idx;
3259         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3260
3261         /* Refill RX ring(s). */
3262         if (work_mask & RXD_OPAQUE_RING_STD) {
3263                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3264                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3265                              sw_idx);
3266         }
3267         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3268                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3269                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3270                              sw_idx);
3271         }
3272         mmiowb();
3273
3274         return received;
3275 }
3276
3277 static int tg3_poll(struct net_device *netdev, int *budget)
3278 {
3279         struct tg3 *tp = netdev_priv(netdev);
3280         struct tg3_hw_status *sblk = tp->hw_status;
3281         int done;
3282
3283         /* handle link change and other phy events */
3284         if (!(tp->tg3_flags &
3285               (TG3_FLAG_USE_LINKCHG_REG |
3286                TG3_FLAG_POLL_SERDES))) {
3287                 if (sblk->status & SD_STATUS_LINK_CHG) {
3288                         sblk->status = SD_STATUS_UPDATED |
3289                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3290                         spin_lock(&tp->lock);
3291                         tg3_setup_phy(tp, 0);
3292                         spin_unlock(&tp->lock);
3293                 }
3294         }
3295
3296         /* run TX completion thread */
3297         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3298                 tg3_tx(tp);
3299         }
3300
3301         /* run RX thread, within the bounds set by NAPI.
3302          * All RX "locking" is done by ensuring outside
3303          * code synchronizes with dev->poll()
3304          */
3305         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3306                 int orig_budget = *budget;
3307                 int work_done;
3308
3309                 if (orig_budget > netdev->quota)
3310                         orig_budget = netdev->quota;
3311
3312                 work_done = tg3_rx(tp, orig_budget);
3313
3314                 *budget -= work_done;
3315                 netdev->quota -= work_done;
3316         }
3317
3318         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3319                 tp->last_tag = sblk->status_tag;
3320                 rmb();
3321         } else
3322                 sblk->status &= ~SD_STATUS_UPDATED;
3323
3324         /* if no more work, tell net stack and NIC we're done */
3325         done = !tg3_has_work(tp);
3326         if (done) {
3327                 netif_rx_complete(netdev);
3328                 tg3_restart_ints(tp);
3329         }
3330
3331         return (done ? 0 : 1);
3332 }
3333
3334 static void tg3_irq_quiesce(struct tg3 *tp)
3335 {
3336         BUG_ON(tp->irq_sync);
3337
3338         tp->irq_sync = 1;
3339         smp_mb();
3340
3341         synchronize_irq(tp->pdev->irq);
3342 }
3343
3344 static inline int tg3_irq_sync(struct tg3 *tp)
3345 {
3346         return tp->irq_sync;
3347 }
3348
3349 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3350  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3351  * with as well.  Most of the time, this is not necessary except when
3352  * shutting down the device.
3353  */
3354 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3355 {
3356         if (irq_sync)
3357                 tg3_irq_quiesce(tp);
3358         spin_lock_bh(&tp->lock);
3359         spin_lock(&tp->tx_lock);
3360 }
3361
3362 static inline void tg3_full_unlock(struct tg3 *tp)
3363 {
3364         spin_unlock(&tp->tx_lock);
3365         spin_unlock_bh(&tp->lock);
3366 }
3367
3368 /* MSI ISR - No need to check for interrupt sharing and no need to
3369  * flush status block and interrupt mailbox. PCI ordering rules
3370  * guarantee that MSI will arrive after the status block.
3371  */
3372 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3373 {
3374         struct net_device *dev = dev_id;
3375         struct tg3 *tp = netdev_priv(dev);
3376
3377         prefetch(tp->hw_status);
3378         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3379         /*
3380          * Writing any value to intr-mbox-0 clears PCI INTA# and
3381          * chip-internal interrupt pending events.
3382          * Writing non-zero to intr-mbox-0 additional tells the
3383          * NIC to stop sending us irqs, engaging "in-intr-handler"
3384          * event coalescing.
3385          */
3386         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3387         if (likely(!tg3_irq_sync(tp)))
3388                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3389
3390         return IRQ_RETVAL(1);
3391 }
3392
3393 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3394 {
3395         struct net_device *dev = dev_id;
3396         struct tg3 *tp = netdev_priv(dev);
3397         struct tg3_hw_status *sblk = tp->hw_status;
3398         unsigned int handled = 1;
3399
3400         /* In INTx mode, it is possible for the interrupt to arrive at
3401          * the CPU before the status block posted prior to the interrupt.
3402          * Reading the PCI State register will confirm whether the
3403          * interrupt is ours and will flush the status block.
3404          */
3405         if ((sblk->status & SD_STATUS_UPDATED) ||
3406             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3407                 /*
3408                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3409                  * chip-internal interrupt pending events.
3410                  * Writing non-zero to intr-mbox-0 additional tells the
3411                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3412                  * event coalescing.
3413                  */
3414                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3415                              0x00000001);
3416                 if (tg3_irq_sync(tp))
3417                         goto out;
3418                 sblk->status &= ~SD_STATUS_UPDATED;
3419                 if (likely(tg3_has_work(tp))) {
3420                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3421                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3422                 } else {
3423                         /* No work, shared interrupt perhaps?  re-enable
3424                          * interrupts, and flush that PCI write
3425                          */
3426                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3427                                 0x00000000);
3428                 }
3429         } else {        /* shared interrupt */
3430                 handled = 0;
3431         }
3432 out:
3433         return IRQ_RETVAL(handled);
3434 }
3435
3436 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440         struct tg3_hw_status *sblk = tp->hw_status;
3441         unsigned int handled = 1;
3442
3443         /* In INTx mode, it is possible for the interrupt to arrive at
3444          * the CPU before the status block posted prior to the interrupt.
3445          * Reading the PCI State register will confirm whether the
3446          * interrupt is ours and will flush the status block.
3447          */
3448         if ((sblk->status_tag != tp->last_tag) ||
3449             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3450                 /*
3451                  * writing any value to intr-mbox-0 clears PCI INTA# and
3452                  * chip-internal interrupt pending events.
3453                  * writing non-zero to intr-mbox-0 additional tells the
3454                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3455                  * event coalescing.
3456                  */
3457                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3458                              0x00000001);
3459                 if (tg3_irq_sync(tp))
3460                         goto out;
3461                 if (netif_rx_schedule_prep(dev)) {
3462                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3463                         /* Update last_tag to mark that this status has been
3464                          * seen. Because interrupt may be shared, we may be
3465                          * racing with tg3_poll(), so only update last_tag
3466                          * if tg3_poll() is not scheduled.
3467                          */
3468                         tp->last_tag = sblk->status_tag;
3469                         __netif_rx_schedule(dev);
3470                 }
3471         } else {        /* shared interrupt */
3472                 handled = 0;
3473         }
3474 out:
3475         return IRQ_RETVAL(handled);
3476 }
3477
3478 /* ISR for interrupt test */
3479 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3480                 struct pt_regs *regs)
3481 {
3482         struct net_device *dev = dev_id;
3483         struct tg3 *tp = netdev_priv(dev);
3484         struct tg3_hw_status *sblk = tp->hw_status;
3485
3486         if ((sblk->status & SD_STATUS_UPDATED) ||
3487             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3488                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3489                              0x00000001);
3490                 return IRQ_RETVAL(1);
3491         }
3492         return IRQ_RETVAL(0);
3493 }
3494
3495 static int tg3_init_hw(struct tg3 *);
3496 static int tg3_halt(struct tg3 *, int, int);
3497
3498 #ifdef CONFIG_NET_POLL_CONTROLLER
3499 static void tg3_poll_controller(struct net_device *dev)
3500 {
3501         struct tg3 *tp = netdev_priv(dev);
3502
3503         tg3_interrupt(tp->pdev->irq, dev, NULL);
3504 }
3505 #endif
3506
3507 static void tg3_reset_task(void *_data)
3508 {
3509         struct tg3 *tp = _data;
3510         unsigned int restart_timer;
3511
3512         tg3_full_lock(tp, 0);
3513         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3514
3515         if (!netif_running(tp->dev)) {
3516                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3517                 tg3_full_unlock(tp);
3518                 return;
3519         }
3520
3521         tg3_full_unlock(tp);
3522
3523         tg3_netif_stop(tp);
3524
3525         tg3_full_lock(tp, 1);
3526
3527         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3528         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3529
3530         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3531         tg3_init_hw(tp);
3532
3533         tg3_netif_start(tp);
3534
3535         if (restart_timer)
3536                 mod_timer(&tp->timer, jiffies + 1);
3537
3538         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3539
3540         tg3_full_unlock(tp);
3541 }
3542
3543 static void tg3_tx_timeout(struct net_device *dev)
3544 {
3545         struct tg3 *tp = netdev_priv(dev);
3546
3547         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3548                dev->name);
3549
3550         schedule_work(&tp->reset_task);
3551 }
3552
3553 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3554 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3555 {
3556         u32 base = (u32) mapping & 0xffffffff;
3557
3558         return ((base > 0xffffdcc0) &&
3559                 (base + len + 8 < base));
3560 }
3561
3562 /* Test for DMA addresses > 40-bit */
3563 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3564                                           int len)
3565 {
3566 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3567         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3568                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3569         return 0;
3570 #else
3571         return 0;
3572 #endif
3573 }
3574
3575 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3576
3577 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3578 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3579                                        u32 last_plus_one, u32 *start,
3580                                        u32 base_flags, u32 mss)
3581 {
3582         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3583         dma_addr_t new_addr = 0;
3584         u32 entry = *start;
3585         int i, ret = 0;
3586
3587         if (!new_skb) {
3588                 ret = -1;
3589         } else {
3590                 /* New SKB is guaranteed to be linear. */
3591                 entry = *start;
3592                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3593                                           PCI_DMA_TODEVICE);
3594                 /* Make sure new skb does not cross any 4G boundaries.
3595                  * Drop the packet if it does.
3596                  */
3597                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3598                         ret = -1;
3599                         dev_kfree_skb(new_skb);
3600                         new_skb = NULL;
3601                 } else {
3602                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3603                                     base_flags, 1 | (mss << 1));
3604                         *start = NEXT_TX(entry);
3605                 }
3606         }
3607
3608         /* Now clean up the sw ring entries. */
3609         i = 0;
3610         while (entry != last_plus_one) {
3611                 int len;
3612
3613                 if (i == 0)
3614                         len = skb_headlen(skb);
3615                 else
3616                         len = skb_shinfo(skb)->frags[i-1].size;
3617                 pci_unmap_single(tp->pdev,
3618                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3619                                  len, PCI_DMA_TODEVICE);
3620                 if (i == 0) {
3621                         tp->tx_buffers[entry].skb = new_skb;
3622                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3623                 } else {
3624                         tp->tx_buffers[entry].skb = NULL;
3625                 }
3626                 entry = NEXT_TX(entry);
3627                 i++;
3628         }
3629
3630         dev_kfree_skb(skb);
3631
3632         return ret;
3633 }
3634
3635 static void tg3_set_txd(struct tg3 *tp, int entry,
3636                         dma_addr_t mapping, int len, u32 flags,
3637                         u32 mss_and_is_end)
3638 {
3639         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3640         int is_end = (mss_and_is_end & 0x1);
3641         u32 mss = (mss_and_is_end >> 1);
3642         u32 vlan_tag = 0;
3643
3644         if (is_end)
3645                 flags |= TXD_FLAG_END;
3646         if (flags & TXD_FLAG_VLAN) {
3647                 vlan_tag = flags >> 16;
3648                 flags &= 0xffff;
3649         }
3650         vlan_tag |= (mss << TXD_MSS_SHIFT);
3651
3652         txd->addr_hi = ((u64) mapping >> 32);
3653         txd->addr_lo = ((u64) mapping & 0xffffffff);
3654         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3655         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3656 }
3657
3658 /* hard_start_xmit for devices that don't have any bugs and
3659  * support TG3_FLG2_HW_TSO_2 only.
3660  */
3661 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3662 {
3663         struct tg3 *tp = netdev_priv(dev);
3664         dma_addr_t mapping;
3665         u32 len, entry, base_flags, mss;
3666
3667         len = skb_headlen(skb);
3668
3669         /* No BH disabling for tx_lock here.  We are running in BH disabled
3670          * context and TX reclaim runs via tp->poll inside of a software
3671          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3672          * no IRQ context deadlocks to worry about either.  Rejoice!
3673          */
3674         if (!spin_trylock(&tp->tx_lock))
3675                 return NETDEV_TX_LOCKED;
3676
3677         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3678                 if (!netif_queue_stopped(dev)) {
3679                         netif_stop_queue(dev);
3680
3681                         /* This is a hard error, log it. */
3682                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3683                                "queue awake!\n", dev->name);
3684                 }
3685                 spin_unlock(&tp->tx_lock);
3686                 return NETDEV_TX_BUSY;
3687         }
3688
3689         entry = tp->tx_prod;
3690         base_flags = 0;
3691 #if TG3_TSO_SUPPORT != 0
3692         mss = 0;
3693         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3694             (mss = skb_shinfo(skb)->tso_size) != 0) {
3695                 int tcp_opt_len, ip_tcp_len;
3696
3697                 if (skb_header_cloned(skb) &&
3698                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3699                         dev_kfree_skb(skb);
3700                         goto out_unlock;
3701                 }
3702
3703                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3704                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3705
3706                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3707                                TXD_FLAG_CPU_POST_DMA);
3708
3709                 skb->nh.iph->check = 0;
3710                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3711
3712                 skb->h.th->check = 0;
3713
3714                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3715         }
3716         else if (skb->ip_summed == CHECKSUM_HW)
3717                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3718 #else
3719         mss = 0;
3720         if (skb->ip_summed == CHECKSUM_HW)
3721                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3722 #endif
3723 #if TG3_VLAN_TAG_USED
3724         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3725                 base_flags |= (TXD_FLAG_VLAN |
3726                                (vlan_tx_tag_get(skb) << 16));
3727 #endif
3728
3729         /* Queue skb data, a.k.a. the main skb fragment. */
3730         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3731
3732         tp->tx_buffers[entry].skb = skb;
3733         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3734
3735         tg3_set_txd(tp, entry, mapping, len, base_flags,
3736                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3737
3738         entry = NEXT_TX(entry);
3739
3740         /* Now loop through additional data fragments, and queue them. */
3741         if (skb_shinfo(skb)->nr_frags > 0) {
3742                 unsigned int i, last;
3743
3744                 last = skb_shinfo(skb)->nr_frags - 1;
3745                 for (i = 0; i <= last; i++) {
3746                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3747
3748                         len = frag->size;
3749                         mapping = pci_map_page(tp->pdev,
3750                                                frag->page,
3751                                                frag->page_offset,
3752                                                len, PCI_DMA_TODEVICE);
3753
3754                         tp->tx_buffers[entry].skb = NULL;
3755                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3756
3757                         tg3_set_txd(tp, entry, mapping, len,
3758                                     base_flags, (i == last) | (mss << 1));
3759
3760                         entry = NEXT_TX(entry);
3761                 }
3762         }
3763
3764         /* Packets are ready, update Tx producer idx local and on card. */
3765         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3766
3767         tp->tx_prod = entry;
3768         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3769                 netif_stop_queue(dev);
3770                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3771                         netif_wake_queue(tp->dev);
3772         }
3773
3774 out_unlock:
3775         mmiowb();
3776         spin_unlock(&tp->tx_lock);
3777
3778         dev->trans_start = jiffies;
3779
3780         return NETDEV_TX_OK;
3781 }
3782
3783 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3784  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3785  */
3786 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3787 {
3788         struct tg3 *tp = netdev_priv(dev);
3789         dma_addr_t mapping;
3790         u32 len, entry, base_flags, mss;
3791         int would_hit_hwbug;
3792
3793         len = skb_headlen(skb);
3794
3795         /* No BH disabling for tx_lock here.  We are running in BH disabled
3796          * context and TX reclaim runs via tp->poll inside of a software
3797          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3798          * no IRQ context deadlocks to worry about either.  Rejoice!
3799          */
3800         if (!spin_trylock(&tp->tx_lock))
3801                 return NETDEV_TX_LOCKED; 
3802
3803         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3804                 if (!netif_queue_stopped(dev)) {
3805                         netif_stop_queue(dev);
3806
3807                         /* This is a hard error, log it. */
3808                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3809                                "queue awake!\n", dev->name);
3810                 }
3811                 spin_unlock(&tp->tx_lock);
3812                 return NETDEV_TX_BUSY;
3813         }
3814
3815         entry = tp->tx_prod;
3816         base_flags = 0;
3817         if (skb->ip_summed == CHECKSUM_HW)
3818                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 #if TG3_TSO_SUPPORT != 0
3820         mss = 0;
3821         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3822             (mss = skb_shinfo(skb)->tso_size) != 0) {
3823                 int tcp_opt_len, ip_tcp_len;
3824
3825                 if (skb_header_cloned(skb) &&
3826                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3827                         dev_kfree_skb(skb);
3828                         goto out_unlock;
3829                 }
3830
3831                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3832                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3833
3834                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3835                                TXD_FLAG_CPU_POST_DMA);
3836
3837                 skb->nh.iph->check = 0;
3838                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3839                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3840                         skb->h.th->check = 0;
3841                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3842                 }
3843                 else {
3844                         skb->h.th->check =
3845                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3846                                                    skb->nh.iph->daddr,
3847                                                    0, IPPROTO_TCP, 0);
3848                 }
3849
3850                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3851                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3852                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3853                                 int tsflags;
3854
3855                                 tsflags = ((skb->nh.iph->ihl - 5) +
3856                                            (tcp_opt_len >> 2));
3857                                 mss |= (tsflags << 11);
3858                         }
3859                 } else {
3860                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3861                                 int tsflags;
3862
3863                                 tsflags = ((skb->nh.iph->ihl - 5) +
3864                                            (tcp_opt_len >> 2));
3865                                 base_flags |= tsflags << 12;
3866                         }
3867                 }
3868         }
3869 #else
3870         mss = 0;
3871 #endif
3872 #if TG3_VLAN_TAG_USED
3873         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3874                 base_flags |= (TXD_FLAG_VLAN |
3875                                (vlan_tx_tag_get(skb) << 16));
3876 #endif
3877
3878         /* Queue skb data, a.k.a. the main skb fragment. */
3879         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3880
3881         tp->tx_buffers[entry].skb = skb;
3882         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3883
3884         would_hit_hwbug = 0;
3885
3886         if (tg3_4g_overflow_test(mapping, len))
3887                 would_hit_hwbug = 1;
3888
3889         tg3_set_txd(tp, entry, mapping, len, base_flags,
3890                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3891
3892         entry = NEXT_TX(entry);
3893
3894         /* Now loop through additional data fragments, and queue them. */
3895         if (skb_shinfo(skb)->nr_frags > 0) {
3896                 unsigned int i, last;
3897
3898                 last = skb_shinfo(skb)->nr_frags - 1;
3899                 for (i = 0; i <= last; i++) {
3900                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3901
3902                         len = frag->size;
3903                         mapping = pci_map_page(tp->pdev,
3904                                                frag->page,
3905                                                frag->page_offset,
3906                                                len, PCI_DMA_TODEVICE);
3907
3908                         tp->tx_buffers[entry].skb = NULL;
3909                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3910
3911                         if (tg3_4g_overflow_test(mapping, len))
3912                                 would_hit_hwbug = 1;
3913
3914                         if (tg3_40bit_overflow_test(tp, mapping, len))
3915                                 would_hit_hwbug = 1;
3916
3917                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3918                                 tg3_set_txd(tp, entry, mapping, len,
3919                                             base_flags, (i == last)|(mss << 1));
3920                         else
3921                                 tg3_set_txd(tp, entry, mapping, len,
3922                                             base_flags, (i == last));
3923
3924                         entry = NEXT_TX(entry);
3925                 }
3926         }
3927
3928         if (would_hit_hwbug) {
3929                 u32 last_plus_one = entry;
3930                 u32 start;
3931
3932                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3933                 start &= (TG3_TX_RING_SIZE - 1);
3934
3935                 /* If the workaround fails due to memory/mapping
3936                  * failure, silently drop this packet.
3937                  */
3938                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3939                                                 &start, base_flags, mss))
3940                         goto out_unlock;
3941
3942                 entry = start;
3943         }
3944
3945         /* Packets are ready, update Tx producer idx local and on card. */
3946         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3947
3948         tp->tx_prod = entry;
3949         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3950                 netif_stop_queue(dev);
3951                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3952                         netif_wake_queue(tp->dev);
3953         }
3954
3955 out_unlock:
3956         mmiowb();
3957         spin_unlock(&tp->tx_lock);
3958
3959         dev->trans_start = jiffies;
3960
3961         return NETDEV_TX_OK;
3962 }
3963
3964 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3965                                int new_mtu)
3966 {
3967         dev->mtu = new_mtu;
3968
3969         if (new_mtu > ETH_DATA_LEN) {
3970                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3971                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3972                         ethtool_op_set_tso(dev, 0);
3973                 }
3974                 else
3975                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3976         } else {
3977                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3978                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3979                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3980         }
3981 }
3982
3983 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3984 {
3985         struct tg3 *tp = netdev_priv(dev);
3986
3987         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3988                 return -EINVAL;
3989
3990         if (!netif_running(dev)) {
3991                 /* We'll just catch it later when the
3992                  * device is up'd.
3993                  */
3994                 tg3_set_mtu(dev, tp, new_mtu);
3995                 return 0;
3996         }
3997
3998         tg3_netif_stop(tp);
3999
4000         tg3_full_lock(tp, 1);
4001
4002         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4003
4004         tg3_set_mtu(dev, tp, new_mtu);
4005
4006         tg3_init_hw(tp);
4007
4008         tg3_netif_start(tp);
4009
4010         tg3_full_unlock(tp);
4011
4012         return 0;
4013 }
4014
4015 /* Free up pending packets in all rx/tx rings.
4016  *
4017  * The chip has been shut down and the driver detached from
4018  * the networking, so no interrupts or new tx packets will
4019  * end up in the driver.  tp->{tx,}lock is not held and we are not
4020  * in an interrupt context and thus may sleep.
4021  */
4022 static void tg3_free_rings(struct tg3 *tp)
4023 {
4024         struct ring_info *rxp;
4025         int i;
4026
4027         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4028                 rxp = &tp->rx_std_buffers[i];
4029
4030                 if (rxp->skb == NULL)
4031                         continue;
4032                 pci_unmap_single(tp->pdev,
4033                                  pci_unmap_addr(rxp, mapping),
4034                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4035                                  PCI_DMA_FROMDEVICE);
4036                 dev_kfree_skb_any(rxp->skb);
4037                 rxp->skb = NULL;
4038         }
4039
4040         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4041                 rxp = &tp->rx_jumbo_buffers[i];
4042
4043                 if (rxp->skb == NULL)
4044                         continue;
4045                 pci_unmap_single(tp->pdev,
4046                                  pci_unmap_addr(rxp, mapping),
4047                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4048                                  PCI_DMA_FROMDEVICE);
4049                 dev_kfree_skb_any(rxp->skb);
4050                 rxp->skb = NULL;
4051         }
4052
4053         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4054                 struct tx_ring_info *txp;
4055                 struct sk_buff *skb;
4056                 int j;
4057
4058                 txp = &tp->tx_buffers[i];
4059                 skb = txp->skb;
4060
4061                 if (skb == NULL) {
4062                         i++;
4063                         continue;
4064                 }
4065
4066                 pci_unmap_single(tp->pdev,
4067                                  pci_unmap_addr(txp, mapping),
4068                                  skb_headlen(skb),
4069                                  PCI_DMA_TODEVICE);
4070                 txp->skb = NULL;
4071
4072                 i++;
4073
4074                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4075                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4076                         pci_unmap_page(tp->pdev,
4077                                        pci_unmap_addr(txp, mapping),
4078                                        skb_shinfo(skb)->frags[j].size,
4079                                        PCI_DMA_TODEVICE);
4080                         i++;
4081                 }
4082
4083                 dev_kfree_skb_any(skb);
4084         }
4085 }
4086
4087 /* Initialize tx/rx rings for packet processing.
4088  *
4089  * The chip has been shut down and the driver detached from
4090  * the networking, so no interrupts or new tx packets will
4091  * end up in the driver.  tp->{tx,}lock are held and thus
4092  * we may not sleep.
4093  */
4094 static void tg3_init_rings(struct tg3 *tp)
4095 {
4096         u32 i;
4097
4098         /* Free up all the SKBs. */
4099         tg3_free_rings(tp);
4100
4101         /* Zero out all descriptors. */
4102         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4103         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4104         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4105         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4106
4107         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4108         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4109             (tp->dev->mtu > ETH_DATA_LEN))
4110                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4111
4112         /* Initialize invariants of the rings, we only set this
4113          * stuff once.  This works because the card does not
4114          * write into the rx buffer posting rings.
4115          */
4116         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4117                 struct tg3_rx_buffer_desc *rxd;
4118
4119                 rxd = &tp->rx_std[i];
4120                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4121                         << RXD_LEN_SHIFT;
4122                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4123                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4124                                (i << RXD_OPAQUE_INDEX_SHIFT));
4125         }
4126
4127         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4128                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4129                         struct tg3_rx_buffer_desc *rxd;
4130
4131                         rxd = &tp->rx_jumbo[i];
4132                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4133                                 << RXD_LEN_SHIFT;
4134                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4135                                 RXD_FLAG_JUMBO;
4136                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4137                                (i << RXD_OPAQUE_INDEX_SHIFT));
4138                 }
4139         }
4140
4141         /* Now allocate fresh SKBs for each rx ring. */
4142         for (i = 0; i < tp->rx_pending; i++) {
4143                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4144                                      -1, i) < 0)
4145                         break;
4146         }
4147
4148         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4149                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4150                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4151                                              -1, i) < 0)
4152                                 break;
4153                 }
4154         }
4155 }
4156
4157 /*
4158  * Must not be invoked with interrupt sources disabled and
4159  * the hardware shutdown down.
4160  */
4161 static void tg3_free_consistent(struct tg3 *tp)
4162 {
4163         kfree(tp->rx_std_buffers);
4164         tp->rx_std_buffers = NULL;
4165         if (tp->rx_std) {
4166                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4167                                     tp->rx_std, tp->rx_std_mapping);
4168                 tp->rx_std = NULL;
4169         }
4170         if (tp->rx_jumbo) {
4171                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4172                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4173                 tp->rx_jumbo = NULL;
4174         }
4175         if (tp->rx_rcb) {
4176                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4177                                     tp->rx_rcb, tp->rx_rcb_mapping);
4178                 tp->rx_rcb = NULL;
4179         }
4180         if (tp->tx_ring) {
4181                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4182                         tp->tx_ring, tp->tx_desc_mapping);
4183                 tp->tx_ring = NULL;
4184         }
4185         if (tp->hw_status) {
4186                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4187                                     tp->hw_status, tp->status_mapping);
4188                 tp->hw_status = NULL;
4189         }
4190         if (tp->hw_stats) {
4191                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4192                                     tp->hw_stats, tp->stats_mapping);
4193                 tp->hw_stats = NULL;
4194         }
4195 }
4196
4197 /*
4198  * Must not be invoked with interrupt sources disabled and
4199  * the hardware shutdown down.  Can sleep.
4200  */
4201 static int tg3_alloc_consistent(struct tg3 *tp)
4202 {
4203         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4204                                       (TG3_RX_RING_SIZE +
4205                                        TG3_RX_JUMBO_RING_SIZE)) +
4206                                      (sizeof(struct tx_ring_info) *
4207                                       TG3_TX_RING_SIZE),
4208                                      GFP_KERNEL);
4209         if (!tp->rx_std_buffers)
4210                 return -ENOMEM;
4211
4212         memset(tp->rx_std_buffers, 0,
4213                (sizeof(struct ring_info) *
4214                 (TG3_RX_RING_SIZE +
4215                  TG3_RX_JUMBO_RING_SIZE)) +
4216                (sizeof(struct tx_ring_info) *
4217                 TG3_TX_RING_SIZE));
4218
4219         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4220         tp->tx_buffers = (struct tx_ring_info *)
4221                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4222
4223         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4224                                           &tp->rx_std_mapping);
4225         if (!tp->rx_std)
4226                 goto err_out;
4227
4228         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4229                                             &tp->rx_jumbo_mapping);
4230
4231         if (!tp->rx_jumbo)
4232                 goto err_out;
4233
4234         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4235                                           &tp->rx_rcb_mapping);
4236         if (!tp->rx_rcb)
4237                 goto err_out;
4238
4239         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4240                                            &tp->tx_desc_mapping);
4241         if (!tp->tx_ring)
4242                 goto err_out;
4243
4244         tp->hw_status = pci_alloc_consistent(tp->pdev,
4245                                              TG3_HW_STATUS_SIZE,
4246                                              &tp->status_mapping);
4247         if (!tp->hw_status)
4248                 goto err_out;
4249
4250         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4251                                             sizeof(struct tg3_hw_stats),
4252                                             &tp->stats_mapping);
4253         if (!tp->hw_stats)
4254                 goto err_out;
4255
4256         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4257         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4258
4259         return 0;
4260
4261 err_out:
4262         tg3_free_consistent(tp);
4263         return -ENOMEM;
4264 }
4265
4266 #define MAX_WAIT_CNT 1000
4267
4268 /* To stop a block, clear the enable bit and poll till it
4269  * clears.  tp->lock is held.
4270  */
4271 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4272 {
4273         unsigned int i;
4274         u32 val;
4275
4276         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4277                 switch (ofs) {
4278                 case RCVLSC_MODE:
4279                 case DMAC_MODE:
4280                 case MBFREE_MODE:
4281                 case BUFMGR_MODE:
4282                 case MEMARB_MODE:
4283                         /* We can't enable/disable these bits of the
4284                          * 5705/5750, just say success.
4285                          */
4286                         return 0;
4287
4288                 default:
4289                         break;
4290                 };
4291         }
4292
4293         val = tr32(ofs);
4294         val &= ~enable_bit;
4295         tw32_f(ofs, val);
4296
4297         for (i = 0; i < MAX_WAIT_CNT; i++) {
4298                 udelay(100);
4299                 val = tr32(ofs);
4300                 if ((val & enable_bit) == 0)
4301                         break;
4302         }
4303
4304         if (i == MAX_WAIT_CNT && !silent) {
4305                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4306                        "ofs=%lx enable_bit=%x\n",
4307                        ofs, enable_bit);
4308                 return -ENODEV;
4309         }
4310
4311         return 0;
4312 }
4313
4314 /* tp->lock is held. */
4315 static int tg3_abort_hw(struct tg3 *tp, int silent)
4316 {
4317         int i, err;
4318
4319         tg3_disable_ints(tp);
4320
4321         tp->rx_mode &= ~RX_MODE_ENABLE;
4322         tw32_f(MAC_RX_MODE, tp->rx_mode);
4323         udelay(10);
4324
4325         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4326         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4327         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4328         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4329         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4330         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4331
4332         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4333         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4334         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4335         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4336         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4337         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4338         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4339
4340         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4341         tw32_f(MAC_MODE, tp->mac_mode);
4342         udelay(40);
4343
4344         tp->tx_mode &= ~TX_MODE_ENABLE;
4345         tw32_f(MAC_TX_MODE, tp->tx_mode);
4346
4347         for (i = 0; i < MAX_WAIT_CNT; i++) {
4348                 udelay(100);
4349                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4350                         break;
4351         }
4352         if (i >= MAX_WAIT_CNT) {
4353                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4354                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4355                        tp->dev->name, tr32(MAC_TX_MODE));
4356                 err |= -ENODEV;
4357         }
4358
4359         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4360         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4361         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4362
4363         tw32(FTQ_RESET, 0xffffffff);
4364         tw32(FTQ_RESET, 0x00000000);
4365
4366         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4367         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4368
4369         if (tp->hw_status)
4370                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4371         if (tp->hw_stats)
4372                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4373
4374         return err;
4375 }
4376
4377 /* tp->lock is held. */
4378 static int tg3_nvram_lock(struct tg3 *tp)
4379 {
4380         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4381                 int i;
4382
4383                 if (tp->nvram_lock_cnt == 0) {
4384                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4385                         for (i = 0; i < 8000; i++) {
4386                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4387                                         break;
4388                                 udelay(20);
4389                         }
4390                         if (i == 8000) {
4391                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4392                                 return -ENODEV;
4393                         }
4394                 }
4395                 tp->nvram_lock_cnt++;
4396         }
4397         return 0;
4398 }
4399
4400 /* tp->lock is held. */
4401 static void tg3_nvram_unlock(struct tg3 *tp)
4402 {
4403         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4404                 if (tp->nvram_lock_cnt > 0)
4405                         tp->nvram_lock_cnt--;
4406                 if (tp->nvram_lock_cnt == 0)
4407                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4408         }
4409 }
4410
4411 /* tp->lock is held. */
4412 static void tg3_enable_nvram_access(struct tg3 *tp)
4413 {
4414         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4415             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4416                 u32 nvaccess = tr32(NVRAM_ACCESS);
4417
4418                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4419         }
4420 }
4421
4422 /* tp->lock is held. */
4423 static void tg3_disable_nvram_access(struct tg3 *tp)
4424 {
4425         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4426             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4427                 u32 nvaccess = tr32(NVRAM_ACCESS);
4428
4429                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4430         }
4431 }
4432
4433 /* tp->lock is held. */
4434 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4435 {
4436         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4437                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4438                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4439
4440         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4441                 switch (kind) {
4442                 case RESET_KIND_INIT:
4443                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4444                                       DRV_STATE_START);
4445                         break;
4446
4447                 case RESET_KIND_SHUTDOWN:
4448                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4449                                       DRV_STATE_UNLOAD);
4450                         break;
4451
4452                 case RESET_KIND_SUSPEND:
4453                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4454                                       DRV_STATE_SUSPEND);
4455                         break;
4456
4457                 default:
4458                         break;
4459                 };
4460         }
4461 }
4462
4463 /* tp->lock is held. */
4464 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4465 {
4466         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4467                 switch (kind) {
4468                 case RESET_KIND_INIT:
4469                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4470                                       DRV_STATE_START_DONE);
4471                         break;
4472
4473                 case RESET_KIND_SHUTDOWN:
4474                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4475                                       DRV_STATE_UNLOAD_DONE);
4476                         break;
4477
4478                 default:
4479                         break;
4480                 };
4481         }
4482 }
4483
4484 /* tp->lock is held. */
4485 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4486 {
4487         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4488                 switch (kind) {
4489                 case RESET_KIND_INIT:
4490                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4491                                       DRV_STATE_START);
4492                         break;
4493
4494                 case RESET_KIND_SHUTDOWN:
4495                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4496                                       DRV_STATE_UNLOAD);
4497                         break;
4498
4499                 case RESET_KIND_SUSPEND:
4500                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4501                                       DRV_STATE_SUSPEND);
4502                         break;
4503
4504                 default:
4505                         break;
4506                 };
4507         }
4508 }
4509
4510 static void tg3_stop_fw(struct tg3 *);
4511
4512 /* tp->lock is held. */
4513 static int tg3_chip_reset(struct tg3 *tp)
4514 {
4515         u32 val;
4516         void (*write_op)(struct tg3 *, u32, u32);
4517         int i;
4518
4519         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4520                 tg3_nvram_lock(tp);
4521                 /* No matching tg3_nvram_unlock() after this because
4522                  * chip reset below will undo the nvram lock.
4523                  */
4524                 tp->nvram_lock_cnt = 0;
4525         }
4526
4527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4528             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4529                 tw32(GRC_FASTBOOT_PC, 0);
4530
4531         /*
4532          * We must avoid the readl() that normally takes place.
4533          * It locks machines, causes machine checks, and other
4534          * fun things.  So, temporarily disable the 5701
4535          * hardware workaround, while we do the reset.
4536          */
4537         write_op = tp->write32;
4538         if (write_op == tg3_write_flush_reg32)
4539                 tp->write32 = tg3_write32;
4540
4541         /* do the reset */
4542         val = GRC_MISC_CFG_CORECLK_RESET;
4543
4544         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4545                 if (tr32(0x7e2c) == 0x60) {
4546                         tw32(0x7e2c, 0x20);
4547                 }
4548                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4549                         tw32(GRC_MISC_CFG, (1 << 29));
4550                         val |= (1 << 29);
4551                 }
4552         }
4553
4554         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4555                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4556         tw32(GRC_MISC_CFG, val);
4557
4558         /* restore 5701 hardware bug workaround write method */
4559         tp->write32 = write_op;
4560
4561         /* Unfortunately, we have to delay before the PCI read back.
4562          * Some 575X chips even will not respond to a PCI cfg access
4563          * when the reset command is given to the chip.
4564          *
4565          * How do these hardware designers expect things to work
4566          * properly if the PCI write is posted for a long period
4567          * of time?  It is always necessary to have some method by
4568          * which a register read back can occur to push the write
4569          * out which does the reset.
4570          *
4571          * For most tg3 variants the trick below was working.
4572          * Ho hum...
4573          */
4574         udelay(120);
4575
4576         /* Flush PCI posted writes.  The normal MMIO registers
4577          * are inaccessible at this time so this is the only
4578          * way to make this reliably (actually, this is no longer
4579          * the case, see above).  I tried to use indirect
4580          * register read/write but this upset some 5701 variants.
4581          */
4582         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4583
4584         udelay(120);
4585
4586         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4587                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4588                         int i;
4589                         u32 cfg_val;
4590
4591                         /* Wait for link training to complete.  */
4592                         for (i = 0; i < 5000; i++)
4593                                 udelay(100);
4594
4595                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4596                         pci_write_config_dword(tp->pdev, 0xc4,
4597                                                cfg_val | (1 << 15));
4598                 }
4599                 /* Set PCIE max payload size and clear error status.  */
4600                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4601         }
4602
4603         /* Re-enable indirect register accesses. */
4604         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4605                                tp->misc_host_ctrl);
4606
4607         /* Set MAX PCI retry to zero. */
4608         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4609         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4610             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4611                 val |= PCISTATE_RETRY_SAME_DMA;
4612         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4613
4614         pci_restore_state(tp->pdev);
4615
4616         /* Make sure PCI-X relaxed ordering bit is clear. */
4617         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4618         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4619         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4620
4621         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4622                 u32 val;
4623
4624                 /* Chip reset on 5780 will reset MSI enable bit,
4625                  * so need to restore it.
4626                  */
4627                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4628                         u16 ctrl;
4629
4630                         pci_read_config_word(tp->pdev,
4631                                              tp->msi_cap + PCI_MSI_FLAGS,
4632                                              &ctrl);
4633                         pci_write_config_word(tp->pdev,
4634                                               tp->msi_cap + PCI_MSI_FLAGS,
4635                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4636                         val = tr32(MSGINT_MODE);
4637                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4638                 }
4639
4640                 val = tr32(MEMARB_MODE);
4641                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4642
4643         } else
4644                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4645
4646         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4647                 tg3_stop_fw(tp);
4648                 tw32(0x5000, 0x400);
4649         }
4650
4651         tw32(GRC_MODE, tp->grc_mode);
4652
4653         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4654                 u32 val = tr32(0xc4);
4655
4656                 tw32(0xc4, val | (1 << 15));
4657         }
4658
4659         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4660             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4661                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4662                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4663                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4664                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4665         }
4666
4667         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4668                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4669                 tw32_f(MAC_MODE, tp->mac_mode);
4670         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4671                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4672                 tw32_f(MAC_MODE, tp->mac_mode);
4673         } else
4674                 tw32_f(MAC_MODE, 0);
4675         udelay(40);
4676
4677         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4678                 /* Wait for firmware initialization to complete. */
4679                 for (i = 0; i < 100000; i++) {
4680                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4681                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4682                                 break;
4683                         udelay(10);
4684                 }
4685                 if (i >= 100000) {
4686                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4687                                "firmware will not restart magic=%08x\n",
4688                                tp->dev->name, val);
4689                         return -ENODEV;
4690                 }
4691         }
4692
4693         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4694             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4695                 u32 val = tr32(0x7c00);
4696
4697                 tw32(0x7c00, val | (1 << 25));
4698         }
4699
4700         /* Reprobe ASF enable state.  */
4701         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4702         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4703         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4704         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4705                 u32 nic_cfg;
4706
4707                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4708                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4709                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4710                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4711                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4712                 }
4713         }
4714
4715         return 0;
4716 }
4717
4718 /* tp->lock is held. */
4719 static void tg3_stop_fw(struct tg3 *tp)
4720 {
4721         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4722                 u32 val;
4723                 int i;
4724
4725                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4726                 val = tr32(GRC_RX_CPU_EVENT);
4727                 val |= (1 << 14);
4728                 tw32(GRC_RX_CPU_EVENT, val);
4729
4730                 /* Wait for RX cpu to ACK the event.  */
4731                 for (i = 0; i < 100; i++) {
4732                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4733                                 break;
4734                         udelay(1);
4735                 }
4736         }
4737 }
4738
4739 /* tp->lock is held. */
4740 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4741 {
4742         int err;
4743
4744         tg3_stop_fw(tp);
4745
4746         tg3_write_sig_pre_reset(tp, kind);
4747
4748         tg3_abort_hw(tp, silent);
4749         err = tg3_chip_reset(tp);
4750
4751         tg3_write_sig_legacy(tp, kind);
4752         tg3_write_sig_post_reset(tp, kind);
4753
4754         if (err)
4755                 return err;
4756
4757         return 0;
4758 }
4759
4760 #define TG3_FW_RELEASE_MAJOR    0x0
4761 #define TG3_FW_RELASE_MINOR     0x0
4762 #define TG3_FW_RELEASE_FIX      0x0
4763 #define TG3_FW_START_ADDR       0x08000000
4764 #define TG3_FW_TEXT_ADDR        0x08000000
4765 #define TG3_FW_TEXT_LEN         0x9c0
4766 #define TG3_FW_RODATA_ADDR      0x080009c0
4767 #define TG3_FW_RODATA_LEN       0x60
4768 #define TG3_FW_DATA_ADDR        0x08000a40
4769 #define TG3_FW_DATA_LEN         0x20
4770 #define TG3_FW_SBSS_ADDR        0x08000a60
4771 #define TG3_FW_SBSS_LEN         0xc
4772 #define TG3_FW_BSS_ADDR         0x08000a70
4773 #define TG3_FW_BSS_LEN          0x10
4774
4775 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4776         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4777         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4778         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4779         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4780         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4781         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4782         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4783         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4784         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4785         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4786         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4787         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4788         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4789         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4790         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4791         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4792         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4793         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4794         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4795         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4796         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4797         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4798         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4799         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4800         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4801         0, 0, 0, 0, 0, 0,
4802         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4803         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4804         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4805         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4806         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4807         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4808         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4809         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4810         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4811         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4812         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4813         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4814         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4815         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4816         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4817         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4818         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4819         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4820         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4821         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4822         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4823         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4824         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4825         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4826         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4827         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4828         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4829         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4830         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4831         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4832         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4833         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4834         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4835         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4836         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4837         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4838         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4839         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4840         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4841         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4842         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4843         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4844         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4845         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4846         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4847         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4848         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4849         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4850         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4851         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4852         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4853         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4854         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4855         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4856         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4857         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4858         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4859         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4860         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4861         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4862         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4863         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4864         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4865         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4866         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4867 };
4868
4869 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4870         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4871         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4872         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4873         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4874         0x00000000
4875 };
4876
4877 #if 0 /* All zeros, don't eat up space with it. */
4878 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4879         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4880         0x00000000, 0x00000000, 0x00000000, 0x00000000
4881 };
4882 #endif
4883
4884 #define RX_CPU_SCRATCH_BASE     0x30000
4885 #define RX_CPU_SCRATCH_SIZE     0x04000
4886 #define TX_CPU_SCRATCH_BASE     0x34000
4887 #define TX_CPU_SCRATCH_SIZE     0x04000
4888
4889 /* tp->lock is held. */
4890 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4891 {
4892         int i;
4893
4894         if (offset == TX_CPU_BASE &&
4895             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4896                 BUG();
4897
4898         if (offset == RX_CPU_BASE) {
4899                 for (i = 0; i < 10000; i++) {
4900                         tw32(offset + CPU_STATE, 0xffffffff);
4901                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4902                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4903                                 break;
4904                 }
4905
4906                 tw32(offset + CPU_STATE, 0xffffffff);
4907                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4908                 udelay(10);
4909         } else {
4910                 for (i = 0; i < 10000; i++) {
4911                         tw32(offset + CPU_STATE, 0xffffffff);
4912                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4913                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4914                                 break;
4915                 }
4916         }
4917
4918         if (i >= 10000) {
4919                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4920                        "and %s CPU\n",
4921                        tp->dev->name,
4922                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4923                 return -ENODEV;
4924         }
4925
4926         /* Clear firmware's nvram arbitration. */
4927         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4928                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4929         return 0;
4930 }
4931
4932 struct fw_info {
4933         unsigned int text_base;
4934         unsigned int text_len;
4935         u32 *text_data;
4936         unsigned int rodata_base;
4937         unsigned int rodata_len;
4938         u32 *rodata_data;
4939         unsigned int data_base;
4940         unsigned int data_len;
4941         u32 *data_data;
4942 };
4943
4944 /* tp->lock is held. */
4945 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4946                                  int cpu_scratch_size, struct fw_info *info)
4947 {
4948         int err, lock_err, i;
4949         void (*write_op)(struct tg3 *, u32, u32);
4950
4951         if (cpu_base == TX_CPU_BASE &&
4952             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4953                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4954                        "TX cpu firmware on %s which is 5705.\n",
4955                        tp->dev->name);
4956                 return -EINVAL;
4957         }
4958
4959         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4960                 write_op = tg3_write_mem;
4961         else
4962                 write_op = tg3_write_indirect_reg32;
4963
4964         /* It is possible that bootcode is still loading at this point.
4965          * Get the nvram lock first before halting the cpu.
4966          */
4967         lock_err = tg3_nvram_lock(tp);
4968         err = tg3_halt_cpu(tp, cpu_base);
4969         if (!lock_err)
4970                 tg3_nvram_unlock(tp);
4971         if (err)
4972                 goto out;
4973
4974         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4975                 write_op(tp, cpu_scratch_base + i, 0);
4976         tw32(cpu_base + CPU_STATE, 0xffffffff);
4977         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4978         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4979                 write_op(tp, (cpu_scratch_base +
4980                               (info->text_base & 0xffff) +
4981                               (i * sizeof(u32))),
4982                          (info->text_data ?
4983                           info->text_data[i] : 0));
4984         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4985                 write_op(tp, (cpu_scratch_base +
4986                               (info->rodata_base & 0xffff) +
4987                               (i * sizeof(u32))),
4988                          (info->rodata_data ?
4989                           info->rodata_data[i] : 0));
4990         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4991                 write_op(tp, (cpu_scratch_base +
4992                               (info->data_base & 0xffff) +
4993                               (i * sizeof(u32))),
4994                          (info->data_data ?
4995                           info->data_data[i] : 0));
4996
4997         err = 0;
4998
4999 out:
5000         return err;
5001 }
5002
5003 /* tp->lock is held. */
5004 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5005 {
5006         struct fw_info info;
5007         int err, i;
5008
5009         info.text_base = TG3_FW_TEXT_ADDR;
5010         info.text_len = TG3_FW_TEXT_LEN;
5011         info.text_data = &tg3FwText[0];
5012         info.rodata_base = TG3_FW_RODATA_ADDR;
5013         info.rodata_len = TG3_FW_RODATA_LEN;
5014         info.rodata_data = &tg3FwRodata[0];
5015         info.data_base = TG3_FW_DATA_ADDR;
5016         info.data_len = TG3_FW_DATA_LEN;
5017         info.data_data = NULL;
5018
5019         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5020                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5021                                     &info);
5022         if (err)
5023                 return err;
5024
5025         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5026                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5027                                     &info);
5028         if (err)
5029                 return err;
5030
5031         /* Now startup only the RX cpu. */
5032         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5033         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5034
5035         for (i = 0; i < 5; i++) {
5036                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5037                         break;
5038                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5039                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5040                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5041                 udelay(1000);
5042         }
5043         if (i >= 5) {
5044                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5045                        "to set RX CPU PC, is %08x should be %08x\n",
5046                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5047                        TG3_FW_TEXT_ADDR);
5048                 return -ENODEV;
5049         }
5050         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5051         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5052
5053         return 0;
5054 }
5055
5056 #if TG3_TSO_SUPPORT != 0
5057
5058 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5059 #define TG3_TSO_FW_RELASE_MINOR         0x6
5060 #define TG3_TSO_FW_RELEASE_FIX          0x0
5061 #define TG3_TSO_FW_START_ADDR           0x08000000
5062 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5063 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5064 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5065 #define TG3_TSO_FW_RODATA_LEN           0x60
5066 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5067 #define TG3_TSO_FW_DATA_LEN             0x30
5068 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5069 #define TG3_TSO_FW_SBSS_LEN             0x2c
5070 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5071 #define TG3_TSO_FW_BSS_LEN              0x894
5072
5073 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5074         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5075         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5076         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5077         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5078         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5079         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5080         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5081         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5082         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5083         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5084         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5085         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5086         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5087         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5088         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5089         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5090         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5091         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5092         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5093         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5094         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5095         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5096         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5097         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5098         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5099         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5100         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5101         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5102         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5103         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5104         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5105         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5106         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5107         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5108         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5109         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5110         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5111         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5112         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5113         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5114         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5115         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5116         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5117         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5118         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5119         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5120         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5121         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5122         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5123         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5124         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5125         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5126         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5127         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5128         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5129         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5130         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5131         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5132         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5133         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5134         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5135         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5136         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5137         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5138         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5139         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5140         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5141         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5142         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5143         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5144         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5145         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5146         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5147         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5148         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5149         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5150         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5151         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5152         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5153         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5154         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5155         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5156         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5157         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5158         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5159         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5160         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5161         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5162         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5163         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5164         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5165         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5166         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5167         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5168         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5169         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5170         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5171         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5172         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5173         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5174         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5175         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5176         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5177         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5178         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5179         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5180         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5181         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5182         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5183         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5184         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5185         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5186         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5187         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5188         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5189         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5190         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5191         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5192         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5193         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5194         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5195         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5196         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5197         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5198         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5199         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5200         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5201         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5202         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5203         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5204         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5205         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5206         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5207         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5208         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5209         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5210         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5211         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5212         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5213         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5214         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5215         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5216         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5217         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5218         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5219         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5220         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5221         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5222         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5223         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5224         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5225         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5226         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5227         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5228         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5229         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5230         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5231         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5232         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5233         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5234         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5235         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5236         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5237         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5238         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5239         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5240         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5241         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5242         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5243         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5244         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5245         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5246         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5247         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5248         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5249         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5250         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5251         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5252         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5253         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5254         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5255         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5256         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5257         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5258         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5259         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5260         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5261         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5262         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5263         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5264         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5265         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5266         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5267         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5268         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5269         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5270         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5271         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5272         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5273         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5274         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5275         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5276         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5277         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5278         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5279         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5280         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5281         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5282         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5283         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5284         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5285         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5286         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5287         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5288         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5289         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5290         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5291         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5292         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5293         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5294         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5295         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5296         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5297         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5298         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5299         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5300         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5301         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5302         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5303         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5304         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5305         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5306         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5307         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5308         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5309         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5310         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5311         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5312         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5313         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5314         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5315         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5316         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5317         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5318         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5319         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5320         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5321         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5322         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5323         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5324         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5325         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5326         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5327         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5328         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5329         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5330         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5331         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5332         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5333         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5334         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5335         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5336         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5337         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5338         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5339         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5340         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5341         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5342         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5343         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5344         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5345         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5346         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5347         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5348         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5349         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5350         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5351         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5352         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5353         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5354         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5355         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5356         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5357         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5358 };
5359
5360 static u32 tg3TsoFwRodata[] = {
5361         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5362         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5363         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5364         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5365         0x00000000,
5366 };
5367
5368 static u32 tg3TsoFwData[] = {
5369         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5370         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5371         0x00000000,
5372 };
5373
5374 /* 5705 needs a special version of the TSO firmware.  */
5375 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5376 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5377 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5378 #define TG3_TSO5_FW_START_ADDR          0x00010000
5379 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5380 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5381 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5382 #define TG3_TSO5_FW_RODATA_LEN          0x50
5383 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5384 #define TG3_TSO5_FW_DATA_LEN            0x20
5385 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5386 #define TG3_TSO5_FW_SBSS_LEN            0x28
5387 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5388 #define TG3_TSO5_FW_BSS_LEN             0x88
5389
5390 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5391         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5392         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5393         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5394         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5395         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5396         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5397         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5398         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5399         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5400         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5401         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5402         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5403         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5404         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5405         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5406         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5407         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5408         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5409         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5410         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5411         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5412         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5413         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5414         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5415         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5416         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5417         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5418         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5419         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5420         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5421         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5422         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5423         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5424         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5425         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5426         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5427         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5428         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5429         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5430         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5431         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5432         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5433         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5434         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5435         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5436         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5437         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5438         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5439         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5440         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5441         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5442         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5443         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5444         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5445         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5446         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5447         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5448         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5449         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5450         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5451         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5452         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5453         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5454         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5455         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5456         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5457         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5458         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5459         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5460         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5461         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5462         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5463         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5464         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5465         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5466         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5467         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5468         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5469         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5470         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5471         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5472         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5473         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5474         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5475         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5476         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5477         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5478         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5479         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5480         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5481         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5482         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5483         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5484         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5485         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5486         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5487         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5488         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5489         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5490         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5491         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5492         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5493         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5494         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5495         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5496         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5497         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5498         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5499         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5500         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5501         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5502         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5503         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5504         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5505         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5506         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5507         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5508         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5509         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5510         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5511         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5512         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5513         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5514         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5515         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5516         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5517         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5518         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5519         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5520         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5521         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5522         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5523         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5524         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5525         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5526         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5527         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5528         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5529         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5530         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5531         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5532         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5533         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5534         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5535         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5536         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5537         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5538         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5539         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5540         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5541         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5542         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5543         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5544         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5545         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5546         0x00000000, 0x00000000, 0x00000000,
5547 };
5548
5549 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5550         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5551         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5552         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5553         0x00000000, 0x00000000, 0x00000000,
5554 };
5555
5556 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5557         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5558         0x00000000, 0x00000000, 0x00000000,
5559 };
5560
5561 /* tp->lock is held. */
5562 static int tg3_load_tso_firmware(struct tg3 *tp)
5563 {
5564         struct fw_info info;
5565         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5566         int err, i;
5567
5568         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5569                 return 0;
5570
5571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5572                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5573                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5574                 info.text_data = &tg3Tso5FwText[0];
5575                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5576                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5577                 info.rodata_data = &tg3Tso5FwRodata[0];
5578                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5579                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5580                 info.data_data = &tg3Tso5FwData[0];
5581                 cpu_base = RX_CPU_BASE;
5582                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5583                 cpu_scratch_size = (info.text_len +
5584                                     info.rodata_len +
5585                                     info.data_len +
5586                                     TG3_TSO5_FW_SBSS_LEN +
5587                                     TG3_TSO5_FW_BSS_LEN);
5588         } else {
5589                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5590                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5591                 info.text_data = &tg3TsoFwText[0];
5592                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5593                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5594                 info.rodata_data = &tg3TsoFwRodata[0];
5595                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5596                 info.data_len = TG3_TSO_FW_DATA_LEN;
5597                 info.data_data = &tg3TsoFwData[0];
5598                 cpu_base = TX_CPU_BASE;
5599                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5600                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5601         }
5602
5603         err = tg3_load_firmware_cpu(tp, cpu_base,
5604                                     cpu_scratch_base, cpu_scratch_size,
5605                                     &info);
5606         if (err)
5607                 return err;
5608
5609         /* Now startup the cpu. */
5610         tw32(cpu_base + CPU_STATE, 0xffffffff);
5611         tw32_f(cpu_base + CPU_PC,    info.text_base);
5612
5613         for (i = 0; i < 5; i++) {
5614                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5615                         break;
5616                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5617                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5618                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5619                 udelay(1000);
5620         }
5621         if (i >= 5) {
5622                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5623                        "to set CPU PC, is %08x should be %08x\n",
5624                        tp->dev->name, tr32(cpu_base + CPU_PC),
5625                        info.text_base);
5626                 return -ENODEV;
5627         }
5628         tw32(cpu_base + CPU_STATE, 0xffffffff);
5629         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5630         return 0;
5631 }
5632
5633 #endif /* TG3_TSO_SUPPORT != 0 */
5634
5635 /* tp->lock is held. */
5636 static void __tg3_set_mac_addr(struct tg3 *tp)
5637 {
5638         u32 addr_high, addr_low;
5639         int i;
5640
5641         addr_high = ((tp->dev->dev_addr[0] << 8) |
5642                      tp->dev->dev_addr[1]);
5643         addr_low = ((tp->dev->dev_addr[2] << 24) |
5644                     (tp->dev->dev_addr[3] << 16) |
5645                     (tp->dev->dev_addr[4] <<  8) |
5646                     (tp->dev->dev_addr[5] <<  0));
5647         for (i = 0; i < 4; i++) {
5648                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5649                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5650         }
5651
5652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5653             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5654                 for (i = 0; i < 12; i++) {
5655                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5656                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5657                 }
5658         }
5659
5660         addr_high = (tp->dev->dev_addr[0] +
5661                      tp->dev->dev_addr[1] +
5662                      tp->dev->dev_addr[2] +
5663                      tp->dev->dev_addr[3] +
5664                      tp->dev->dev_addr[4] +
5665                      tp->dev->dev_addr[5]) &
5666                 TX_BACKOFF_SEED_MASK;
5667         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5668 }
5669
5670 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5671 {
5672         struct tg3 *tp = netdev_priv(dev);
5673         struct sockaddr *addr = p;
5674
5675         if (!is_valid_ether_addr(addr->sa_data))
5676                 return -EINVAL;
5677
5678         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5679
5680         if (!netif_running(dev))
5681                 return 0;
5682
5683         spin_lock_bh(&tp->lock);
5684         __tg3_set_mac_addr(tp);
5685         spin_unlock_bh(&tp->lock);
5686
5687         return 0;
5688 }
5689
5690 /* tp->lock is held. */
5691 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5692                            dma_addr_t mapping, u32 maxlen_flags,
5693                            u32 nic_addr)
5694 {
5695         tg3_write_mem(tp,
5696                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5697                       ((u64) mapping >> 32));
5698         tg3_write_mem(tp,
5699                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5700                       ((u64) mapping & 0xffffffff));
5701         tg3_write_mem(tp,
5702                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5703                        maxlen_flags);
5704
5705         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5706                 tg3_write_mem(tp,
5707                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5708                               nic_addr);
5709 }
5710
5711 static void __tg3_set_rx_mode(struct net_device *);
5712 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5713 {
5714         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5715         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5716         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5717         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5718         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5719                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5720                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5721         }
5722         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5723         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5724         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5725                 u32 val = ec->stats_block_coalesce_usecs;
5726
5727                 if (!netif_carrier_ok(tp->dev))
5728                         val = 0;
5729
5730                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5731         }
5732 }
5733
5734 /* tp->lock is held. */
5735 static int tg3_reset_hw(struct tg3 *tp)
5736 {
5737         u32 val, rdmac_mode;
5738         int i, err, limit;
5739
5740         tg3_disable_ints(tp);
5741
5742         tg3_stop_fw(tp);
5743
5744         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5745
5746         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5747                 tg3_abort_hw(tp, 1);
5748         }
5749
5750         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5751                 tg3_phy_reset(tp);
5752
5753         err = tg3_chip_reset(tp);
5754         if (err)
5755                 return err;
5756
5757         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5758
5759         /* This works around an issue with Athlon chipsets on
5760          * B3 tigon3 silicon.  This bit has no effect on any
5761          * other revision.  But do not set this on PCI Express
5762          * chips.
5763          */
5764         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5765                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5766         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5767
5768         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5769             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5770                 val = tr32(TG3PCI_PCISTATE);
5771                 val |= PCISTATE_RETRY_SAME_DMA;
5772                 tw32(TG3PCI_PCISTATE, val);
5773         }
5774
5775         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5776                 /* Enable some hw fixes.  */
5777                 val = tr32(TG3PCI_MSI_DATA);
5778                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5779                 tw32(TG3PCI_MSI_DATA, val);
5780         }
5781
5782         /* Descriptor ring init may make accesses to the
5783          * NIC SRAM area to setup the TX descriptors, so we
5784          * can only do this after the hardware has been
5785          * successfully reset.
5786          */
5787         tg3_init_rings(tp);
5788
5789         /* This value is determined during the probe time DMA
5790          * engine test, tg3_test_dma.
5791          */
5792         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5793
5794         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5795                           GRC_MODE_4X_NIC_SEND_RINGS |
5796                           GRC_MODE_NO_TX_PHDR_CSUM |
5797                           GRC_MODE_NO_RX_PHDR_CSUM);
5798         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5799         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5800                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5801         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5802                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5803
5804         tw32(GRC_MODE,
5805              tp->grc_mode |
5806              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5807
5808         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5809         val = tr32(GRC_MISC_CFG);
5810         val &= ~0xff;
5811         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5812         tw32(GRC_MISC_CFG, val);
5813
5814         /* Initialize MBUF/DESC pool. */
5815         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5816                 /* Do nothing.  */
5817         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5818                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5819                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5820                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5821                 else
5822                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5823                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5824                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5825         }
5826 #if TG3_TSO_SUPPORT != 0
5827         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5828                 int fw_len;
5829
5830                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5831                           TG3_TSO5_FW_RODATA_LEN +
5832                           TG3_TSO5_FW_DATA_LEN +
5833                           TG3_TSO5_FW_SBSS_LEN +
5834                           TG3_TSO5_FW_BSS_LEN);
5835                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5836                 tw32(BUFMGR_MB_POOL_ADDR,
5837                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5838                 tw32(BUFMGR_MB_POOL_SIZE,
5839                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5840         }
5841 #endif
5842
5843         if (tp->dev->mtu <= ETH_DATA_LEN) {
5844                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5845                      tp->bufmgr_config.mbuf_read_dma_low_water);
5846                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5847                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5848                 tw32(BUFMGR_MB_HIGH_WATER,
5849                      tp->bufmgr_config.mbuf_high_water);
5850         } else {
5851                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5852                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5853                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5854                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5855                 tw32(BUFMGR_MB_HIGH_WATER,
5856                      tp->bufmgr_config.mbuf_high_water_jumbo);
5857         }
5858         tw32(BUFMGR_DMA_LOW_WATER,
5859              tp->bufmgr_config.dma_low_water);
5860         tw32(BUFMGR_DMA_HIGH_WATER,
5861              tp->bufmgr_config.dma_high_water);
5862
5863         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5864         for (i = 0; i < 2000; i++) {
5865                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5866                         break;
5867                 udelay(10);
5868         }
5869         if (i >= 2000) {
5870                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5871                        tp->dev->name);
5872                 return -ENODEV;
5873         }
5874
5875         /* Setup replenish threshold. */
5876         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5877
5878         /* Initialize TG3_BDINFO's at:
5879          *  RCVDBDI_STD_BD:     standard eth size rx ring
5880          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5881          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5882          *
5883          * like so:
5884          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5885          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5886          *                              ring attribute flags
5887          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5888          *
5889          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5890          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5891          *
5892          * The size of each ring is fixed in the firmware, but the location is
5893          * configurable.
5894          */
5895         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5896              ((u64) tp->rx_std_mapping >> 32));
5897         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5898              ((u64) tp->rx_std_mapping & 0xffffffff));
5899         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5900              NIC_SRAM_RX_BUFFER_DESC);
5901
5902         /* Don't even try to program the JUMBO/MINI buffer descriptor
5903          * configs on 5705.
5904          */
5905         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5906                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5907                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5908         } else {
5909                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5910                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5911
5912                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5913                      BDINFO_FLAGS_DISABLED);
5914
5915                 /* Setup replenish threshold. */
5916                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5917
5918                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5919                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5920                              ((u64) tp->rx_jumbo_mapping >> 32));
5921                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5922                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5923                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5924                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5925                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5926                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5927                 } else {
5928                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5929                              BDINFO_FLAGS_DISABLED);
5930                 }
5931
5932         }
5933
5934         /* There is only one send ring on 5705/5750, no need to explicitly
5935          * disable the others.
5936          */
5937         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5938                 /* Clear out send RCB ring in SRAM. */
5939                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5940                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5941                                       BDINFO_FLAGS_DISABLED);
5942         }
5943
5944         tp->tx_prod = 0;
5945         tp->tx_cons = 0;
5946         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5947         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5948
5949         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5950                        tp->tx_desc_mapping,
5951                        (TG3_TX_RING_SIZE <<
5952                         BDINFO_FLAGS_MAXLEN_SHIFT),
5953                        NIC_SRAM_TX_BUFFER_DESC);
5954
5955         /* There is only one receive return ring on 5705/5750, no need
5956          * to explicitly disable the others.
5957          */
5958         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5959                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5960                      i += TG3_BDINFO_SIZE) {
5961                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5962                                       BDINFO_FLAGS_DISABLED);
5963                 }
5964         }
5965
5966         tp->rx_rcb_ptr = 0;
5967         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5968
5969         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5970                        tp->rx_rcb_mapping,
5971                        (TG3_RX_RCB_RING_SIZE(tp) <<
5972                         BDINFO_FLAGS_MAXLEN_SHIFT),
5973                        0);
5974
5975         tp->rx_std_ptr = tp->rx_pending;
5976         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5977                      tp->rx_std_ptr);
5978
5979         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5980                                                 tp->rx_jumbo_pending : 0;
5981         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5982                      tp->rx_jumbo_ptr);
5983
5984         /* Initialize MAC address and backoff seed. */
5985         __tg3_set_mac_addr(tp);
5986
5987         /* MTU + ethernet header + FCS + optional VLAN tag */
5988         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5989
5990         /* The slot time is changed by tg3_setup_phy if we
5991          * run at gigabit with half duplex.
5992          */
5993         tw32(MAC_TX_LENGTHS,
5994              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5995              (6 << TX_LENGTHS_IPG_SHIFT) |
5996              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5997
5998         /* Receive rules. */
5999         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6000         tw32(RCVLPC_CONFIG, 0x0181);
6001
6002         /* Calculate RDMAC_MODE setting early, we need it to determine
6003          * the RCVLPC_STATE_ENABLE mask.
6004          */
6005         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6006                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6007                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6008                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6009                       RDMAC_MODE_LNGREAD_ENAB);
6010         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6011                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6012
6013         /* If statement applies to 5705 and 5750 PCI devices only */
6014         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6015              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6016             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6017                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6018                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6019                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6020                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6021                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6022                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6023                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6024                 }
6025         }
6026
6027         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6028                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6029
6030 #if TG3_TSO_SUPPORT != 0
6031         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6032                 rdmac_mode |= (1 << 27);
6033 #endif
6034
6035         /* Receive/send statistics. */
6036         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6037             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6038                 val = tr32(RCVLPC_STATS_ENABLE);
6039                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6040                 tw32(RCVLPC_STATS_ENABLE, val);
6041         } else {
6042                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6043         }
6044         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6045         tw32(SNDDATAI_STATSENAB, 0xffffff);
6046         tw32(SNDDATAI_STATSCTRL,
6047              (SNDDATAI_SCTRL_ENABLE |
6048               SNDDATAI_SCTRL_FASTUPD));
6049
6050         /* Setup host coalescing engine. */
6051         tw32(HOSTCC_MODE, 0);
6052         for (i = 0; i < 2000; i++) {
6053                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6054                         break;
6055                 udelay(10);
6056         }
6057
6058         __tg3_set_coalesce(tp, &tp->coal);
6059
6060         /* set status block DMA address */
6061         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6062              ((u64) tp->status_mapping >> 32));
6063         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6064              ((u64) tp->status_mapping & 0xffffffff));
6065
6066         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6067                 /* Status/statistics block address.  See tg3_timer,
6068                  * the tg3_periodic_fetch_stats call there, and
6069                  * tg3_get_stats to see how this works for 5705/5750 chips.
6070                  */
6071                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6072                      ((u64) tp->stats_mapping >> 32));
6073                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6074                      ((u64) tp->stats_mapping & 0xffffffff));
6075                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6076                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6077         }
6078
6079         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6080
6081         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6082         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6083         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6084                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6085
6086         /* Clear statistics/status block in chip, and status block in ram. */
6087         for (i = NIC_SRAM_STATS_BLK;
6088              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6089              i += sizeof(u32)) {
6090                 tg3_write_mem(tp, i, 0);
6091                 udelay(40);
6092         }
6093         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6094
6095         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6096                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6097                 /* reset to prevent losing 1st rx packet intermittently */
6098                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6099                 udelay(10);
6100         }
6101
6102         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6103                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6104         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6105         udelay(40);
6106
6107         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6108          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6109          * register to preserve the GPIO settings for LOMs. The GPIOs,
6110          * whether used as inputs or outputs, are set by boot code after
6111          * reset.
6112          */
6113         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6114                 u32 gpio_mask;
6115
6116                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6117                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6118
6119                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6120                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6121                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6122
6123                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6124
6125                 /* GPIO1 must be driven high for eeprom write protect */
6126                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6127                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6128         }
6129         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6130         udelay(100);
6131
6132         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6133         tp->last_tag = 0;
6134
6135         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6136                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6137                 udelay(40);
6138         }
6139
6140         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6141                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6142                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6143                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6144                WDMAC_MODE_LNGREAD_ENAB);
6145
6146         /* If statement applies to 5705 and 5750 PCI devices only */
6147         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6148              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6150                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6151                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6152                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6153                         /* nothing */
6154                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6155                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6156                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6157                         val |= WDMAC_MODE_RX_ACCEL;
6158                 }
6159         }
6160
6161         /* Enable host coalescing bug fix */
6162         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
6163                 val |= (1 << 29);
6164
6165         tw32_f(WDMAC_MODE, val);
6166         udelay(40);
6167
6168         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6169                 val = tr32(TG3PCI_X_CAPS);
6170                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6171                         val &= ~PCIX_CAPS_BURST_MASK;
6172                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6173                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6174                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6175                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6176                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6177                                 val |= (tp->split_mode_max_reqs <<
6178                                         PCIX_CAPS_SPLIT_SHIFT);
6179                 }
6180                 tw32(TG3PCI_X_CAPS, val);
6181         }
6182
6183         tw32_f(RDMAC_MODE, rdmac_mode);
6184         udelay(40);
6185
6186         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6187         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6188                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6189         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6190         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6191         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6192         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6193         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6194 #if TG3_TSO_SUPPORT != 0
6195         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6196                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6197 #endif
6198         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6199         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6200
6201         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6202                 err = tg3_load_5701_a0_firmware_fix(tp);
6203                 if (err)
6204                         return err;
6205         }
6206
6207 #if TG3_TSO_SUPPORT != 0
6208         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6209                 err = tg3_load_tso_firmware(tp);
6210                 if (err)
6211                         return err;
6212         }
6213 #endif
6214
6215         tp->tx_mode = TX_MODE_ENABLE;
6216         tw32_f(MAC_TX_MODE, tp->tx_mode);
6217         udelay(100);
6218
6219         tp->rx_mode = RX_MODE_ENABLE;
6220         tw32_f(MAC_RX_MODE, tp->rx_mode);
6221         udelay(10);
6222
6223         if (tp->link_config.phy_is_low_power) {
6224                 tp->link_config.phy_is_low_power = 0;
6225                 tp->link_config.speed = tp->link_config.orig_speed;
6226                 tp->link_config.duplex = tp->link_config.orig_duplex;
6227                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6228         }
6229
6230         tp->mi_mode = MAC_MI_MODE_BASE;
6231         tw32_f(MAC_MI_MODE, tp->mi_mode);
6232         udelay(80);
6233
6234         tw32(MAC_LED_CTRL, tp->led_ctrl);
6235
6236         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6237         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6238                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6239                 udelay(10);
6240         }
6241         tw32_f(MAC_RX_MODE, tp->rx_mode);
6242         udelay(10);
6243
6244         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6245                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6246                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6247                         /* Set drive transmission level to 1.2V  */
6248                         /* only if the signal pre-emphasis bit is not set  */
6249                         val = tr32(MAC_SERDES_CFG);
6250                         val &= 0xfffff000;
6251                         val |= 0x880;
6252                         tw32(MAC_SERDES_CFG, val);
6253                 }
6254                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6255                         tw32(MAC_SERDES_CFG, 0x616000);
6256         }
6257
6258         /* Prevent chip from dropping frames when flow control
6259          * is enabled.
6260          */
6261         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6262
6263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6264             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6265                 /* Use hardware link auto-negotiation */
6266                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6267         }
6268
6269         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6270             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6271                 u32 tmp;
6272
6273                 tmp = tr32(SERDES_RX_CTRL);
6274                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6275                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6276                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6277                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6278         }
6279
6280         err = tg3_setup_phy(tp, 1);
6281         if (err)
6282                 return err;
6283
6284         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6285                 u32 tmp;
6286
6287                 /* Clear CRC stats. */
6288                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6289                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6290                         tg3_readphy(tp, 0x14, &tmp);
6291                 }
6292         }
6293
6294         __tg3_set_rx_mode(tp->dev);
6295
6296         /* Initialize receive rules. */
6297         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6298         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6299         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6300         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6301
6302         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6303             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6304                 limit = 8;
6305         else
6306                 limit = 16;
6307         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6308                 limit -= 4;
6309         switch (limit) {
6310         case 16:
6311                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6312         case 15:
6313                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6314         case 14:
6315                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6316         case 13:
6317                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6318         case 12:
6319                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6320         case 11:
6321                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6322         case 10:
6323                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6324         case 9:
6325                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6326         case 8:
6327                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6328         case 7:
6329                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6330         case 6:
6331                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6332         case 5:
6333                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6334         case 4:
6335                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6336         case 3:
6337                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6338         case 2:
6339         case 1:
6340
6341         default:
6342                 break;
6343         };
6344
6345         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6346
6347         return 0;
6348 }
6349
6350 /* Called at device open time to get the chip ready for
6351  * packet processing.  Invoked with tp->lock held.
6352  */
6353 static int tg3_init_hw(struct tg3 *tp)
6354 {
6355         int err;
6356
6357         /* Force the chip into D0. */
6358         err = tg3_set_power_state(tp, PCI_D0);
6359         if (err)
6360                 goto out;
6361
6362         tg3_switch_clocks(tp);
6363
6364         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6365
6366         err = tg3_reset_hw(tp);
6367
6368 out:
6369         return err;
6370 }
6371
6372 #define TG3_STAT_ADD32(PSTAT, REG) \
6373 do {    u32 __val = tr32(REG); \
6374         (PSTAT)->low += __val; \
6375         if ((PSTAT)->low < __val) \
6376                 (PSTAT)->high += 1; \
6377 } while (0)
6378
6379 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6380 {
6381         struct tg3_hw_stats *sp = tp->hw_stats;
6382
6383         if (!netif_carrier_ok(tp->dev))
6384                 return;
6385
6386         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6387         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6388         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6389         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6390         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6391         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6392         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6393         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6394         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6395         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6396         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6397         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6398         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6399
6400         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6401         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6402         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6403         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6404         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6405         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6406         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6407         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6408         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6409         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6410         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6411         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6412         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6413         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6414 }
6415
6416 static void tg3_timer(unsigned long __opaque)
6417 {
6418         struct tg3 *tp = (struct tg3 *) __opaque;
6419
6420         spin_lock(&tp->lock);
6421
6422         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6423                 /* All of this garbage is because when using non-tagged
6424                  * IRQ status the mailbox/status_block protocol the chip
6425                  * uses with the cpu is race prone.
6426                  */
6427                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6428                         tw32(GRC_LOCAL_CTRL,
6429                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6430                 } else {
6431                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6432                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6433                 }
6434
6435                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6436                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6437                         spin_unlock(&tp->lock);
6438                         schedule_work(&tp->reset_task);
6439                         return;
6440                 }
6441         }
6442
6443         /* This part only runs once per second. */
6444         if (!--tp->timer_counter) {
6445                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6446                         tg3_periodic_fetch_stats(tp);
6447
6448                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6449                         u32 mac_stat;
6450                         int phy_event;
6451
6452                         mac_stat = tr32(MAC_STATUS);
6453
6454                         phy_event = 0;
6455                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6456                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6457                                         phy_event = 1;
6458                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6459                                 phy_event = 1;
6460
6461                         if (phy_event)
6462                                 tg3_setup_phy(tp, 0);
6463                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6464                         u32 mac_stat = tr32(MAC_STATUS);
6465                         int need_setup = 0;
6466
6467                         if (netif_carrier_ok(tp->dev) &&
6468                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6469                                 need_setup = 1;
6470                         }
6471                         if (! netif_carrier_ok(tp->dev) &&
6472                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6473                                          MAC_STATUS_SIGNAL_DET))) {
6474                                 need_setup = 1;
6475                         }
6476                         if (need_setup) {
6477                                 tw32_f(MAC_MODE,
6478                                      (tp->mac_mode &
6479                                       ~MAC_MODE_PORT_MODE_MASK));
6480                                 udelay(40);
6481                                 tw32_f(MAC_MODE, tp->mac_mode);
6482                                 udelay(40);
6483                                 tg3_setup_phy(tp, 0);
6484                         }
6485                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6486                         tg3_serdes_parallel_detect(tp);
6487
6488                 tp->timer_counter = tp->timer_multiplier;
6489         }
6490
6491         /* Heartbeat is only sent once every 2 seconds.  */
6492         if (!--tp->asf_counter) {
6493                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6494                         u32 val;
6495
6496                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6497                                            FWCMD_NICDRV_ALIVE2);
6498                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6499                         /* 5 seconds timeout */
6500                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6501                         val = tr32(GRC_RX_CPU_EVENT);
6502                         val |= (1 << 14);
6503                         tw32(GRC_RX_CPU_EVENT, val);
6504                 }
6505                 tp->asf_counter = tp->asf_multiplier;
6506         }
6507
6508         spin_unlock(&tp->lock);
6509
6510         tp->timer.expires = jiffies + tp->timer_offset;
6511         add_timer(&tp->timer);
6512 }
6513
6514 static int tg3_test_interrupt(struct tg3 *tp)
6515 {
6516         struct net_device *dev = tp->dev;
6517         int err, i;
6518         u32 int_mbox = 0;
6519
6520         if (!netif_running(dev))
6521                 return -ENODEV;
6522
6523         tg3_disable_ints(tp);
6524
6525         free_irq(tp->pdev->irq, dev);
6526
6527         err = request_irq(tp->pdev->irq, tg3_test_isr,
6528                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6529         if (err)
6530                 return err;
6531
6532         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6533         tg3_enable_ints(tp);
6534
6535         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6536                HOSTCC_MODE_NOW);
6537
6538         for (i = 0; i < 5; i++) {
6539                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6540                                         TG3_64BIT_REG_LOW);
6541                 if (int_mbox != 0)
6542                         break;
6543                 msleep(10);
6544         }
6545
6546         tg3_disable_ints(tp);
6547
6548         free_irq(tp->pdev->irq, dev);
6549         
6550         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6551                 err = request_irq(tp->pdev->irq, tg3_msi,
6552                                   SA_SAMPLE_RANDOM, dev->name, dev);
6553         else {
6554                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6555                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6556                         fn = tg3_interrupt_tagged;
6557                 err = request_irq(tp->pdev->irq, fn,
6558                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6559         }
6560
6561         if (err)
6562                 return err;
6563
6564         if (int_mbox != 0)
6565                 return 0;
6566
6567         return -EIO;
6568 }
6569
6570 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6571  * successfully restored
6572  */
6573 static int tg3_test_msi(struct tg3 *tp)
6574 {
6575         struct net_device *dev = tp->dev;
6576         int err;
6577         u16 pci_cmd;
6578
6579         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6580                 return 0;
6581
6582         /* Turn off SERR reporting in case MSI terminates with Master
6583          * Abort.
6584          */
6585         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6586         pci_write_config_word(tp->pdev, PCI_COMMAND,
6587                               pci_cmd & ~PCI_COMMAND_SERR);
6588
6589         err = tg3_test_interrupt(tp);
6590
6591         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6592
6593         if (!err)
6594                 return 0;
6595
6596         /* other failures */
6597         if (err != -EIO)
6598                 return err;
6599
6600         /* MSI test failed, go back to INTx mode */
6601         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6602                "switching to INTx mode. Please report this failure to "
6603                "the PCI maintainer and include system chipset information.\n",
6604                        tp->dev->name);
6605
6606         free_irq(tp->pdev->irq, dev);
6607         pci_disable_msi(tp->pdev);
6608
6609         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6610
6611         {
6612                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6613                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6614                         fn = tg3_interrupt_tagged;
6615
6616                 err = request_irq(tp->pdev->irq, fn,
6617                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6618         }
6619         if (err)
6620                 return err;
6621
6622         /* Need to reset the chip because the MSI cycle may have terminated
6623          * with Master Abort.
6624          */
6625         tg3_full_lock(tp, 1);
6626
6627         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6628         err = tg3_init_hw(tp);
6629
6630         tg3_full_unlock(tp);
6631
6632         if (err)
6633                 free_irq(tp->pdev->irq, dev);
6634
6635         return err;
6636 }
6637
6638 static int tg3_open(struct net_device *dev)
6639 {
6640         struct tg3 *tp = netdev_priv(dev);
6641         int err;
6642
6643         tg3_full_lock(tp, 0);
6644
6645         err = tg3_set_power_state(tp, PCI_D0);
6646         if (err)
6647                 return err;
6648
6649         tg3_disable_ints(tp);
6650         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6651
6652         tg3_full_unlock(tp);
6653
6654         /* The placement of this call is tied
6655          * to the setup and use of Host TX descriptors.
6656          */
6657         err = tg3_alloc_consistent(tp);
6658         if (err)
6659                 return err;
6660
6661         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6662             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6663             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6664             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6665               (tp->pdev_peer == tp->pdev))) {
6666                 /* All MSI supporting chips should support tagged
6667                  * status.  Assert that this is the case.
6668                  */
6669                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6670                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6671                                "Not using MSI.\n", tp->dev->name);
6672                 } else if (pci_enable_msi(tp->pdev) == 0) {
6673                         u32 msi_mode;
6674
6675                         msi_mode = tr32(MSGINT_MODE);
6676                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6677                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6678                 }
6679         }
6680         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6681                 err = request_irq(tp->pdev->irq, tg3_msi,
6682                                   SA_SAMPLE_RANDOM, dev->name, dev);
6683         else {
6684                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6685                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6686                         fn = tg3_interrupt_tagged;
6687
6688                 err = request_irq(tp->pdev->irq, fn,
6689                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6690         }
6691
6692         if (err) {
6693                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6694                         pci_disable_msi(tp->pdev);
6695                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6696                 }
6697                 tg3_free_consistent(tp);
6698                 return err;
6699         }
6700
6701         tg3_full_lock(tp, 0);
6702
6703         err = tg3_init_hw(tp);
6704         if (err) {
6705                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6706                 tg3_free_rings(tp);
6707         } else {
6708                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6709                         tp->timer_offset = HZ;
6710                 else
6711                         tp->timer_offset = HZ / 10;
6712
6713                 BUG_ON(tp->timer_offset > HZ);
6714                 tp->timer_counter = tp->timer_multiplier =
6715                         (HZ / tp->timer_offset);
6716                 tp->asf_counter = tp->asf_multiplier =
6717                         ((HZ / tp->timer_offset) * 2);
6718
6719                 init_timer(&tp->timer);
6720                 tp->timer.expires = jiffies + tp->timer_offset;
6721                 tp->timer.data = (unsigned long) tp;
6722                 tp->timer.function = tg3_timer;
6723         }
6724
6725         tg3_full_unlock(tp);
6726
6727         if (err) {
6728                 free_irq(tp->pdev->irq, dev);
6729                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6730                         pci_disable_msi(tp->pdev);
6731                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6732                 }
6733                 tg3_free_consistent(tp);
6734                 return err;
6735         }
6736
6737         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6738                 err = tg3_test_msi(tp);
6739
6740                 if (err) {
6741                         tg3_full_lock(tp, 0);
6742
6743                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6744                                 pci_disable_msi(tp->pdev);
6745                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6746                         }
6747                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6748                         tg3_free_rings(tp);
6749                         tg3_free_consistent(tp);
6750
6751                         tg3_full_unlock(tp);
6752
6753                         return err;
6754                 }
6755         }
6756
6757         tg3_full_lock(tp, 0);
6758
6759         add_timer(&tp->timer);
6760         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6761         tg3_enable_ints(tp);
6762
6763         tg3_full_unlock(tp);
6764
6765         netif_start_queue(dev);
6766
6767         return 0;
6768 }
6769
6770 #if 0
6771 /*static*/ void tg3_dump_state(struct tg3 *tp)
6772 {
6773         u32 val32, val32_2, val32_3, val32_4, val32_5;
6774         u16 val16;
6775         int i;
6776
6777         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6778         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6779         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6780                val16, val32);
6781
6782         /* MAC block */
6783         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6784                tr32(MAC_MODE), tr32(MAC_STATUS));
6785         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6786                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6787         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6788                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6789         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6790                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6791
6792         /* Send data initiator control block */
6793         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6794                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6795         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6796                tr32(SNDDATAI_STATSCTRL));
6797
6798         /* Send data completion control block */
6799         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6800
6801         /* Send BD ring selector block */
6802         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6803                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6804
6805         /* Send BD initiator control block */
6806         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6807                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6808
6809         /* Send BD completion control block */
6810         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6811
6812         /* Receive list placement control block */
6813         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6814                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6815         printk("       RCVLPC_STATSCTRL[%08x]\n",
6816                tr32(RCVLPC_STATSCTRL));
6817
6818         /* Receive data and receive BD initiator control block */
6819         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6820                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6821
6822         /* Receive data completion control block */
6823         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6824                tr32(RCVDCC_MODE));
6825
6826         /* Receive BD initiator control block */
6827         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6828                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6829
6830         /* Receive BD completion control block */
6831         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6832                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6833
6834         /* Receive list selector control block */
6835         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6836                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6837
6838         /* Mbuf cluster free block */
6839         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6840                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6841
6842         /* Host coalescing control block */
6843         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6844                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6845         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6846                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6847                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6848         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6849                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6850                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6851         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6852                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6853         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6854                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6855
6856         /* Memory arbiter control block */
6857         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6858                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6859
6860         /* Buffer manager control block */
6861         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6862                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6863         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6864                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6865         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6866                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6867                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6868                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6869
6870         /* Read DMA control block */
6871         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6872                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6873
6874         /* Write DMA control block */
6875         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6876                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6877
6878         /* DMA completion block */
6879         printk("DEBUG: DMAC_MODE[%08x]\n",
6880                tr32(DMAC_MODE));
6881
6882         /* GRC block */
6883         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6884                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6885         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6886                tr32(GRC_LOCAL_CTRL));
6887
6888         /* TG3_BDINFOs */
6889         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6890                tr32(RCVDBDI_JUMBO_BD + 0x0),
6891                tr32(RCVDBDI_JUMBO_BD + 0x4),
6892                tr32(RCVDBDI_JUMBO_BD + 0x8),
6893                tr32(RCVDBDI_JUMBO_BD + 0xc));
6894         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6895                tr32(RCVDBDI_STD_BD + 0x0),
6896                tr32(RCVDBDI_STD_BD + 0x4),
6897                tr32(RCVDBDI_STD_BD + 0x8),
6898                tr32(RCVDBDI_STD_BD + 0xc));
6899         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6900                tr32(RCVDBDI_MINI_BD + 0x0),
6901                tr32(RCVDBDI_MINI_BD + 0x4),
6902                tr32(RCVDBDI_MINI_BD + 0x8),
6903                tr32(RCVDBDI_MINI_BD + 0xc));
6904
6905         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6906         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6907         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6908         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6909         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6910                val32, val32_2, val32_3, val32_4);
6911
6912         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6913         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6914         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6915         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6916         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6917                val32, val32_2, val32_3, val32_4);
6918
6919         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6920         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6921         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6922         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6923         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6924         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6925                val32, val32_2, val32_3, val32_4, val32_5);
6926
6927         /* SW status block */
6928         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6929                tp->hw_status->status,
6930                tp->hw_status->status_tag,
6931                tp->hw_status->rx_jumbo_consumer,
6932                tp->hw_status->rx_consumer,
6933                tp->hw_status->rx_mini_consumer,
6934                tp->hw_status->idx[0].rx_producer,
6935                tp->hw_status->idx[0].tx_consumer);
6936
6937         /* SW statistics block */
6938         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6939                ((u32 *)tp->hw_stats)[0],
6940                ((u32 *)tp->hw_stats)[1],
6941                ((u32 *)tp->hw_stats)[2],
6942                ((u32 *)tp->hw_stats)[3]);
6943
6944         /* Mailboxes */
6945         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6946                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6947                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6948                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6949                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6950
6951         /* NIC side send descriptors. */
6952         for (i = 0; i < 6; i++) {
6953                 unsigned long txd;
6954
6955                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6956                         + (i * sizeof(struct tg3_tx_buffer_desc));
6957                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6958                        i,
6959                        readl(txd + 0x0), readl(txd + 0x4),
6960                        readl(txd + 0x8), readl(txd + 0xc));
6961         }
6962
6963         /* NIC side RX descriptors. */
6964         for (i = 0; i < 6; i++) {
6965                 unsigned long rxd;
6966
6967                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6968                         + (i * sizeof(struct tg3_rx_buffer_desc));
6969                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6970                        i,
6971                        readl(rxd + 0x0), readl(rxd + 0x4),
6972                        readl(rxd + 0x8), readl(rxd + 0xc));
6973                 rxd += (4 * sizeof(u32));
6974                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6975                        i,
6976                        readl(rxd + 0x0), readl(rxd + 0x4),
6977                        readl(rxd + 0x8), readl(rxd + 0xc));
6978         }
6979
6980         for (i = 0; i < 6; i++) {
6981                 unsigned long rxd;
6982
6983                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6984                         + (i * sizeof(struct tg3_rx_buffer_desc));
6985                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6986                        i,
6987                        readl(rxd + 0x0), readl(rxd + 0x4),
6988                        readl(rxd + 0x8), readl(rxd + 0xc));
6989                 rxd += (4 * sizeof(u32));
6990                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6991                        i,
6992                        readl(rxd + 0x0), readl(rxd + 0x4),
6993                        readl(rxd + 0x8), readl(rxd + 0xc));
6994         }
6995 }
6996 #endif
6997
6998 static struct net_device_stats *tg3_get_stats(struct net_device *);
6999 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7000
7001 static int tg3_close(struct net_device *dev)
7002 {
7003         struct tg3 *tp = netdev_priv(dev);
7004
7005         /* Calling flush_scheduled_work() may deadlock because
7006          * linkwatch_event() may be on the workqueue and it will try to get
7007          * the rtnl_lock which we are holding.
7008          */
7009         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7010                 msleep(1);
7011
7012         netif_stop_queue(dev);
7013
7014         del_timer_sync(&tp->timer);
7015
7016         tg3_full_lock(tp, 1);
7017 #if 0
7018         tg3_dump_state(tp);
7019 #endif
7020
7021         tg3_disable_ints(tp);
7022
7023         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7024         tg3_free_rings(tp);
7025         tp->tg3_flags &=
7026                 ~(TG3_FLAG_INIT_COMPLETE |
7027                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7028
7029         tg3_full_unlock(tp);
7030
7031         free_irq(tp->pdev->irq, dev);
7032         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7033                 pci_disable_msi(tp->pdev);
7034                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7035         }
7036
7037         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7038                sizeof(tp->net_stats_prev));
7039         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7040                sizeof(tp->estats_prev));
7041
7042         tg3_free_consistent(tp);
7043
7044         tg3_set_power_state(tp, PCI_D3hot);
7045
7046         netif_carrier_off(tp->dev);
7047
7048         return 0;
7049 }
7050
7051 static inline unsigned long get_stat64(tg3_stat64_t *val)
7052 {
7053         unsigned long ret;
7054
7055 #if (BITS_PER_LONG == 32)
7056         ret = val->low;
7057 #else
7058         ret = ((u64)val->high << 32) | ((u64)val->low);
7059 #endif
7060         return ret;
7061 }
7062
7063 static unsigned long calc_crc_errors(struct tg3 *tp)
7064 {
7065         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7066
7067         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7068             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7069              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7070                 u32 val;
7071
7072                 spin_lock_bh(&tp->lock);
7073                 if (!tg3_readphy(tp, 0x1e, &val)) {
7074                         tg3_writephy(tp, 0x1e, val | 0x8000);
7075                         tg3_readphy(tp, 0x14, &val);
7076                 } else
7077                         val = 0;
7078                 spin_unlock_bh(&tp->lock);
7079
7080                 tp->phy_crc_errors += val;
7081
7082                 return tp->phy_crc_errors;
7083         }
7084
7085         return get_stat64(&hw_stats->rx_fcs_errors);
7086 }
7087
7088 #define ESTAT_ADD(member) \
7089         estats->member =        old_estats->member + \
7090                                 get_stat64(&hw_stats->member)
7091
7092 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7093 {
7094         struct tg3_ethtool_stats *estats = &tp->estats;
7095         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7096         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7097
7098         if (!hw_stats)
7099                 return old_estats;
7100
7101         ESTAT_ADD(rx_octets);
7102         ESTAT_ADD(rx_fragments);
7103         ESTAT_ADD(rx_ucast_packets);
7104         ESTAT_ADD(rx_mcast_packets);
7105         ESTAT_ADD(rx_bcast_packets);
7106         ESTAT_ADD(rx_fcs_errors);
7107         ESTAT_ADD(rx_align_errors);
7108         ESTAT_ADD(rx_xon_pause_rcvd);
7109         ESTAT_ADD(rx_xoff_pause_rcvd);
7110         ESTAT_ADD(rx_mac_ctrl_rcvd);
7111         ESTAT_ADD(rx_xoff_entered);
7112         ESTAT_ADD(rx_frame_too_long_errors);
7113         ESTAT_ADD(rx_jabbers);
7114         ESTAT_ADD(rx_undersize_packets);
7115         ESTAT_ADD(rx_in_length_errors);
7116         ESTAT_ADD(rx_out_length_errors);
7117         ESTAT_ADD(rx_64_or_less_octet_packets);
7118         ESTAT_ADD(rx_65_to_127_octet_packets);
7119         ESTAT_ADD(rx_128_to_255_octet_packets);
7120         ESTAT_ADD(rx_256_to_511_octet_packets);
7121         ESTAT_ADD(rx_512_to_1023_octet_packets);
7122         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7123         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7124         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7125         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7126         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7127
7128         ESTAT_ADD(tx_octets);
7129         ESTAT_ADD(tx_collisions);
7130         ESTAT_ADD(tx_xon_sent);
7131         ESTAT_ADD(tx_xoff_sent);
7132         ESTAT_ADD(tx_flow_control);
7133         ESTAT_ADD(tx_mac_errors);
7134         ESTAT_ADD(tx_single_collisions);
7135         ESTAT_ADD(tx_mult_collisions);
7136         ESTAT_ADD(tx_deferred);
7137         ESTAT_ADD(tx_excessive_collisions);
7138         ESTAT_ADD(tx_late_collisions);
7139         ESTAT_ADD(tx_collide_2times);
7140         ESTAT_ADD(tx_collide_3times);
7141         ESTAT_ADD(tx_collide_4times);
7142         ESTAT_ADD(tx_collide_5times);
7143         ESTAT_ADD(tx_collide_6times);
7144         ESTAT_ADD(tx_collide_7times);
7145         ESTAT_ADD(tx_collide_8times);
7146         ESTAT_ADD(tx_collide_9times);
7147         ESTAT_ADD(tx_collide_10times);
7148         ESTAT_ADD(tx_collide_11times);
7149         ESTAT_ADD(tx_collide_12times);
7150         ESTAT_ADD(tx_collide_13times);
7151         ESTAT_ADD(tx_collide_14times);
7152         ESTAT_ADD(tx_collide_15times);
7153         ESTAT_ADD(tx_ucast_packets);
7154         ESTAT_ADD(tx_mcast_packets);
7155         ESTAT_ADD(tx_bcast_packets);
7156         ESTAT_ADD(tx_carrier_sense_errors);
7157         ESTAT_ADD(tx_discards);
7158         ESTAT_ADD(tx_errors);
7159
7160         ESTAT_ADD(dma_writeq_full);
7161         ESTAT_ADD(dma_write_prioq_full);
7162         ESTAT_ADD(rxbds_empty);
7163         ESTAT_ADD(rx_discards);
7164         ESTAT_ADD(rx_errors);
7165         ESTAT_ADD(rx_threshold_hit);
7166
7167         ESTAT_ADD(dma_readq_full);
7168         ESTAT_ADD(dma_read_prioq_full);
7169         ESTAT_ADD(tx_comp_queue_full);
7170
7171         ESTAT_ADD(ring_set_send_prod_index);
7172         ESTAT_ADD(ring_status_update);
7173         ESTAT_ADD(nic_irqs);
7174         ESTAT_ADD(nic_avoided_irqs);
7175         ESTAT_ADD(nic_tx_threshold_hit);
7176
7177         return estats;
7178 }
7179
7180 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7181 {
7182         struct tg3 *tp = netdev_priv(dev);
7183         struct net_device_stats *stats = &tp->net_stats;
7184         struct net_device_stats *old_stats = &tp->net_stats_prev;
7185         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7186
7187         if (!hw_stats)
7188                 return old_stats;
7189
7190         stats->rx_packets = old_stats->rx_packets +
7191                 get_stat64(&hw_stats->rx_ucast_packets) +
7192                 get_stat64(&hw_stats->rx_mcast_packets) +
7193                 get_stat64(&hw_stats->rx_bcast_packets);
7194                 
7195         stats->tx_packets = old_stats->tx_packets +
7196                 get_stat64(&hw_stats->tx_ucast_packets) +
7197                 get_stat64(&hw_stats->tx_mcast_packets) +
7198                 get_stat64(&hw_stats->tx_bcast_packets);
7199
7200         stats->rx_bytes = old_stats->rx_bytes +
7201                 get_stat64(&hw_stats->rx_octets);
7202         stats->tx_bytes = old_stats->tx_bytes +
7203                 get_stat64(&hw_stats->tx_octets);
7204
7205         stats->rx_errors = old_stats->rx_errors +
7206                 get_stat64(&hw_stats->rx_errors);
7207         stats->tx_errors = old_stats->tx_errors +
7208                 get_stat64(&hw_stats->tx_errors) +
7209                 get_stat64(&hw_stats->tx_mac_errors) +
7210                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7211                 get_stat64(&hw_stats->tx_discards);
7212
7213         stats->multicast = old_stats->multicast +
7214                 get_stat64(&hw_stats->rx_mcast_packets);
7215         stats->collisions = old_stats->collisions +
7216                 get_stat64(&hw_stats->tx_collisions);
7217
7218         stats->rx_length_errors = old_stats->rx_length_errors +
7219                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7220                 get_stat64(&hw_stats->rx_undersize_packets);
7221
7222         stats->rx_over_errors = old_stats->rx_over_errors +
7223                 get_stat64(&hw_stats->rxbds_empty);
7224         stats->rx_frame_errors = old_stats->rx_frame_errors +
7225                 get_stat64(&hw_stats->rx_align_errors);
7226         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7227                 get_stat64(&hw_stats->tx_discards);
7228         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7229                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7230
7231         stats->rx_crc_errors = old_stats->rx_crc_errors +
7232                 calc_crc_errors(tp);
7233
7234         stats->rx_missed_errors = old_stats->rx_missed_errors +
7235                 get_stat64(&hw_stats->rx_discards);
7236
7237         return stats;
7238 }
7239
7240 static inline u32 calc_crc(unsigned char *buf, int len)
7241 {
7242         u32 reg;
7243         u32 tmp;
7244         int j, k;
7245
7246         reg = 0xffffffff;
7247
7248         for (j = 0; j < len; j++) {
7249                 reg ^= buf[j];
7250
7251                 for (k = 0; k < 8; k++) {
7252                         tmp = reg & 0x01;
7253
7254                         reg >>= 1;
7255
7256                         if (tmp) {
7257                                 reg ^= 0xedb88320;
7258                         }
7259                 }
7260         }
7261
7262         return ~reg;
7263 }
7264
7265 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7266 {
7267         /* accept or reject all multicast frames */
7268         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7269         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7270         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7271         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7272 }
7273
7274 static void __tg3_set_rx_mode(struct net_device *dev)
7275 {
7276         struct tg3 *tp = netdev_priv(dev);
7277         u32 rx_mode;
7278
7279         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7280                                   RX_MODE_KEEP_VLAN_TAG);
7281
7282         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7283          * flag clear.
7284          */
7285 #if TG3_VLAN_TAG_USED
7286         if (!tp->vlgrp &&
7287             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7288                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7289 #else
7290         /* By definition, VLAN is disabled always in this
7291          * case.
7292          */
7293         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7294                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7295 #endif
7296
7297         if (dev->flags & IFF_PROMISC) {
7298                 /* Promiscuous mode. */
7299                 rx_mode |= RX_MODE_PROMISC;
7300         } else if (dev->flags & IFF_ALLMULTI) {
7301                 /* Accept all multicast. */
7302                 tg3_set_multi (tp, 1);
7303         } else if (dev->mc_count < 1) {
7304                 /* Reject all multicast. */
7305                 tg3_set_multi (tp, 0);
7306         } else {
7307                 /* Accept one or more multicast(s). */
7308                 struct dev_mc_list *mclist;
7309                 unsigned int i;
7310                 u32 mc_filter[4] = { 0, };
7311                 u32 regidx;
7312                 u32 bit;
7313                 u32 crc;
7314
7315                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7316                      i++, mclist = mclist->next) {
7317
7318                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7319                         bit = ~crc & 0x7f;
7320                         regidx = (bit & 0x60) >> 5;
7321                         bit &= 0x1f;
7322                         mc_filter[regidx] |= (1 << bit);
7323                 }
7324
7325                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7326                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7327                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7328                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7329         }
7330
7331         if (rx_mode != tp->rx_mode) {
7332                 tp->rx_mode = rx_mode;
7333                 tw32_f(MAC_RX_MODE, rx_mode);
7334                 udelay(10);
7335         }
7336 }
7337
7338 static void tg3_set_rx_mode(struct net_device *dev)
7339 {
7340         struct tg3 *tp = netdev_priv(dev);
7341
7342         if (!netif_running(dev))
7343                 return;
7344
7345         tg3_full_lock(tp, 0);
7346         __tg3_set_rx_mode(dev);
7347         tg3_full_unlock(tp);
7348 }
7349
7350 #define TG3_REGDUMP_LEN         (32 * 1024)
7351
7352 static int tg3_get_regs_len(struct net_device *dev)
7353 {
7354         return TG3_REGDUMP_LEN;
7355 }
7356
7357 static void tg3_get_regs(struct net_device *dev,
7358                 struct ethtool_regs *regs, void *_p)
7359 {
7360         u32 *p = _p;
7361         struct tg3 *tp = netdev_priv(dev);
7362         u8 *orig_p = _p;
7363         int i;
7364
7365         regs->version = 0;
7366
7367         memset(p, 0, TG3_REGDUMP_LEN);
7368
7369         if (tp->link_config.phy_is_low_power)
7370                 return;
7371
7372         tg3_full_lock(tp, 0);
7373
7374 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7375 #define GET_REG32_LOOP(base,len)                \
7376 do {    p = (u32 *)(orig_p + (base));           \
7377         for (i = 0; i < len; i += 4)            \
7378                 __GET_REG32((base) + i);        \
7379 } while (0)
7380 #define GET_REG32_1(reg)                        \
7381 do {    p = (u32 *)(orig_p + (reg));            \
7382         __GET_REG32((reg));                     \
7383 } while (0)
7384
7385         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7386         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7387         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7388         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7389         GET_REG32_1(SNDDATAC_MODE);
7390         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7391         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7392         GET_REG32_1(SNDBDC_MODE);
7393         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7394         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7395         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7396         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7397         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7398         GET_REG32_1(RCVDCC_MODE);
7399         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7400         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7401         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7402         GET_REG32_1(MBFREE_MODE);
7403         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7404         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7405         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7406         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7407         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7408         GET_REG32_1(RX_CPU_MODE);
7409         GET_REG32_1(RX_CPU_STATE);
7410         GET_REG32_1(RX_CPU_PGMCTR);
7411         GET_REG32_1(RX_CPU_HWBKPT);
7412         GET_REG32_1(TX_CPU_MODE);
7413         GET_REG32_1(TX_CPU_STATE);
7414         GET_REG32_1(TX_CPU_PGMCTR);
7415         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7416         GET_REG32_LOOP(FTQ_RESET, 0x120);
7417         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7418         GET_REG32_1(DMAC_MODE);
7419         GET_REG32_LOOP(GRC_MODE, 0x4c);
7420         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7421                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7422
7423 #undef __GET_REG32
7424 #undef GET_REG32_LOOP
7425 #undef GET_REG32_1
7426
7427         tg3_full_unlock(tp);
7428 }
7429
7430 static int tg3_get_eeprom_len(struct net_device *dev)
7431 {
7432         struct tg3 *tp = netdev_priv(dev);
7433
7434         return tp->nvram_size;
7435 }
7436
7437 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7438
7439 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7440 {
7441         struct tg3 *tp = netdev_priv(dev);
7442         int ret;
7443         u8  *pd;
7444         u32 i, offset, len, val, b_offset, b_count;
7445
7446         if (tp->link_config.phy_is_low_power)
7447                 return -EAGAIN;
7448
7449         offset = eeprom->offset;
7450         len = eeprom->len;
7451         eeprom->len = 0;
7452
7453         eeprom->magic = TG3_EEPROM_MAGIC;
7454
7455         if (offset & 3) {
7456                 /* adjustments to start on required 4 byte boundary */
7457                 b_offset = offset & 3;
7458                 b_count = 4 - b_offset;
7459                 if (b_count > len) {
7460                         /* i.e. offset=1 len=2 */
7461                         b_count = len;
7462                 }
7463                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7464                 if (ret)
7465                         return ret;
7466                 val = cpu_to_le32(val);
7467                 memcpy(data, ((char*)&val) + b_offset, b_count);
7468                 len -= b_count;
7469                 offset += b_count;
7470                 eeprom->len += b_count;
7471         }
7472
7473         /* read bytes upto the last 4 byte boundary */
7474         pd = &data[eeprom->len];
7475         for (i = 0; i < (len - (len & 3)); i += 4) {
7476                 ret = tg3_nvram_read(tp, offset + i, &val);
7477                 if (ret) {
7478                         eeprom->len += i;
7479                         return ret;
7480                 }
7481                 val = cpu_to_le32(val);
7482                 memcpy(pd + i, &val, 4);
7483         }
7484         eeprom->len += i;
7485
7486         if (len & 3) {
7487                 /* read last bytes not ending on 4 byte boundary */
7488                 pd = &data[eeprom->len];
7489                 b_count = len & 3;
7490                 b_offset = offset + len - b_count;
7491                 ret = tg3_nvram_read(tp, b_offset, &val);
7492                 if (ret)
7493                         return ret;
7494                 val = cpu_to_le32(val);
7495                 memcpy(pd, ((char*)&val), b_count);
7496                 eeprom->len += b_count;
7497         }
7498         return 0;
7499 }
7500
7501 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7502
7503 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7504 {
7505         struct tg3 *tp = netdev_priv(dev);
7506         int ret;
7507         u32 offset, len, b_offset, odd_len, start, end;
7508         u8 *buf;
7509
7510         if (tp->link_config.phy_is_low_power)
7511                 return -EAGAIN;
7512
7513         if (eeprom->magic != TG3_EEPROM_MAGIC)
7514                 return -EINVAL;
7515
7516         offset = eeprom->offset;
7517         len = eeprom->len;
7518
7519         if ((b_offset = (offset & 3))) {
7520                 /* adjustments to start on required 4 byte boundary */
7521                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7522                 if (ret)
7523                         return ret;
7524                 start = cpu_to_le32(start);
7525                 len += b_offset;
7526                 offset &= ~3;
7527                 if (len < 4)
7528                         len = 4;
7529         }
7530
7531         odd_len = 0;
7532         if (len & 3) {
7533                 /* adjustments to end on required 4 byte boundary */
7534                 odd_len = 1;
7535                 len = (len + 3) & ~3;
7536                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7537                 if (ret)
7538                         return ret;
7539                 end = cpu_to_le32(end);
7540         }
7541
7542         buf = data;
7543         if (b_offset || odd_len) {
7544                 buf = kmalloc(len, GFP_KERNEL);
7545                 if (buf == 0)
7546                         return -ENOMEM;
7547                 if (b_offset)
7548                         memcpy(buf, &start, 4);
7549                 if (odd_len)
7550                         memcpy(buf+len-4, &end, 4);
7551                 memcpy(buf + b_offset, data, eeprom->len);
7552         }
7553
7554         ret = tg3_nvram_write_block(tp, offset, len, buf);
7555
7556         if (buf != data)
7557                 kfree(buf);
7558
7559         return ret;
7560 }
7561
7562 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7563 {
7564         struct tg3 *tp = netdev_priv(dev);
7565   
7566         cmd->supported = (SUPPORTED_Autoneg);
7567
7568         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7569                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7570                                    SUPPORTED_1000baseT_Full);
7571
7572         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7573                 cmd->supported |= (SUPPORTED_100baseT_Half |
7574                                   SUPPORTED_100baseT_Full |
7575                                   SUPPORTED_10baseT_Half |
7576                                   SUPPORTED_10baseT_Full |
7577                                   SUPPORTED_MII);
7578         else
7579                 cmd->supported |= SUPPORTED_FIBRE;
7580   
7581         cmd->advertising = tp->link_config.advertising;
7582         if (netif_running(dev)) {
7583                 cmd->speed = tp->link_config.active_speed;
7584                 cmd->duplex = tp->link_config.active_duplex;
7585         }
7586         cmd->port = 0;
7587         cmd->phy_address = PHY_ADDR;
7588         cmd->transceiver = 0;
7589         cmd->autoneg = tp->link_config.autoneg;
7590         cmd->maxtxpkt = 0;
7591         cmd->maxrxpkt = 0;
7592         return 0;
7593 }
7594   
7595 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7596 {
7597         struct tg3 *tp = netdev_priv(dev);
7598   
7599         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7600                 /* These are the only valid advertisement bits allowed.  */
7601                 if (cmd->autoneg == AUTONEG_ENABLE &&
7602                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7603                                           ADVERTISED_1000baseT_Full |
7604                                           ADVERTISED_Autoneg |
7605                                           ADVERTISED_FIBRE)))
7606                         return -EINVAL;
7607                 /* Fiber can only do SPEED_1000.  */
7608                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7609                          (cmd->speed != SPEED_1000))
7610                         return -EINVAL;
7611         /* Copper cannot force SPEED_1000.  */
7612         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7613                    (cmd->speed == SPEED_1000))
7614                 return -EINVAL;
7615         else if ((cmd->speed == SPEED_1000) &&
7616                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7617                 return -EINVAL;
7618
7619         tg3_full_lock(tp, 0);
7620
7621         tp->link_config.autoneg = cmd->autoneg;
7622         if (cmd->autoneg == AUTONEG_ENABLE) {
7623                 tp->link_config.advertising = cmd->advertising;
7624                 tp->link_config.speed = SPEED_INVALID;
7625                 tp->link_config.duplex = DUPLEX_INVALID;
7626         } else {
7627                 tp->link_config.advertising = 0;
7628                 tp->link_config.speed = cmd->speed;
7629                 tp->link_config.duplex = cmd->duplex;
7630         }
7631   
7632         if (netif_running(dev))
7633                 tg3_setup_phy(tp, 1);
7634
7635         tg3_full_unlock(tp);
7636   
7637         return 0;
7638 }
7639   
7640 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7641 {
7642         struct tg3 *tp = netdev_priv(dev);
7643   
7644         strcpy(info->driver, DRV_MODULE_NAME);
7645         strcpy(info->version, DRV_MODULE_VERSION);
7646         strcpy(info->bus_info, pci_name(tp->pdev));
7647 }
7648   
7649 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7650 {
7651         struct tg3 *tp = netdev_priv(dev);
7652   
7653         wol->supported = WAKE_MAGIC;
7654         wol->wolopts = 0;
7655         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7656                 wol->wolopts = WAKE_MAGIC;
7657         memset(&wol->sopass, 0, sizeof(wol->sopass));
7658 }
7659   
7660 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7661 {
7662         struct tg3 *tp = netdev_priv(dev);
7663   
7664         if (wol->wolopts & ~WAKE_MAGIC)
7665                 return -EINVAL;
7666         if ((wol->wolopts & WAKE_MAGIC) &&
7667             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7668             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7669                 return -EINVAL;
7670   
7671         spin_lock_bh(&tp->lock);
7672         if (wol->wolopts & WAKE_MAGIC)
7673                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7674         else
7675                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7676         spin_unlock_bh(&tp->lock);
7677   
7678         return 0;
7679 }
7680   
7681 static u32 tg3_get_msglevel(struct net_device *dev)
7682 {
7683         struct tg3 *tp = netdev_priv(dev);
7684         return tp->msg_enable;
7685 }
7686   
7687 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7688 {
7689         struct tg3 *tp = netdev_priv(dev);
7690         tp->msg_enable = value;
7691 }
7692   
7693 #if TG3_TSO_SUPPORT != 0
7694 static int tg3_set_tso(struct net_device *dev, u32 value)
7695 {
7696         struct tg3 *tp = netdev_priv(dev);
7697
7698         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7699                 if (value)
7700                         return -EINVAL;
7701                 return 0;
7702         }
7703         return ethtool_op_set_tso(dev, value);
7704 }
7705 #endif
7706   
7707 static int tg3_nway_reset(struct net_device *dev)
7708 {
7709         struct tg3 *tp = netdev_priv(dev);
7710         u32 bmcr;
7711         int r;
7712   
7713         if (!netif_running(dev))
7714                 return -EAGAIN;
7715
7716         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7717                 return -EINVAL;
7718
7719         spin_lock_bh(&tp->lock);
7720         r = -EINVAL;
7721         tg3_readphy(tp, MII_BMCR, &bmcr);
7722         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7723             ((bmcr & BMCR_ANENABLE) ||
7724              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7725                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7726                                            BMCR_ANENABLE);
7727                 r = 0;
7728         }
7729         spin_unlock_bh(&tp->lock);
7730   
7731         return r;
7732 }
7733   
7734 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7735 {
7736         struct tg3 *tp = netdev_priv(dev);
7737   
7738         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7739         ering->rx_mini_max_pending = 0;
7740         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7741                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7742         else
7743                 ering->rx_jumbo_max_pending = 0;
7744
7745         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7746
7747         ering->rx_pending = tp->rx_pending;
7748         ering->rx_mini_pending = 0;
7749         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7750                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7751         else
7752                 ering->rx_jumbo_pending = 0;
7753
7754         ering->tx_pending = tp->tx_pending;
7755 }
7756   
7757 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7758 {
7759         struct tg3 *tp = netdev_priv(dev);
7760         int irq_sync = 0;
7761   
7762         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7763             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7764             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7765                 return -EINVAL;
7766   
7767         if (netif_running(dev)) {
7768                 tg3_netif_stop(tp);
7769                 irq_sync = 1;
7770         }
7771
7772         tg3_full_lock(tp, irq_sync);
7773   
7774         tp->rx_pending = ering->rx_pending;
7775
7776         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7777             tp->rx_pending > 63)
7778                 tp->rx_pending = 63;
7779         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7780         tp->tx_pending = ering->tx_pending;
7781
7782         if (netif_running(dev)) {
7783                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7784                 tg3_init_hw(tp);
7785                 tg3_netif_start(tp);
7786         }
7787
7788         tg3_full_unlock(tp);
7789   
7790         return 0;
7791 }
7792   
7793 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7794 {
7795         struct tg3 *tp = netdev_priv(dev);
7796   
7797         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7798         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7799         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7800 }
7801   
7802 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7803 {
7804         struct tg3 *tp = netdev_priv(dev);
7805         int irq_sync = 0;
7806   
7807         if (netif_running(dev)) {
7808                 tg3_netif_stop(tp);
7809                 irq_sync = 1;
7810         }
7811
7812         tg3_full_lock(tp, irq_sync);
7813
7814         if (epause->autoneg)
7815                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7816         else
7817                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7818         if (epause->rx_pause)
7819                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7820         else
7821                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7822         if (epause->tx_pause)
7823                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7824         else
7825                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7826
7827         if (netif_running(dev)) {
7828                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7829                 tg3_init_hw(tp);
7830                 tg3_netif_start(tp);
7831         }
7832
7833         tg3_full_unlock(tp);
7834   
7835         return 0;
7836 }
7837   
7838 static u32 tg3_get_rx_csum(struct net_device *dev)
7839 {
7840         struct tg3 *tp = netdev_priv(dev);
7841         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7842 }
7843   
7844 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7845 {
7846         struct tg3 *tp = netdev_priv(dev);
7847   
7848         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7849                 if (data != 0)
7850                         return -EINVAL;
7851                 return 0;
7852         }
7853   
7854         spin_lock_bh(&tp->lock);
7855         if (data)
7856                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7857         else
7858                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7859         spin_unlock_bh(&tp->lock);
7860   
7861         return 0;
7862 }
7863   
7864 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7865 {
7866         struct tg3 *tp = netdev_priv(dev);
7867   
7868         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7869                 if (data != 0)
7870                         return -EINVAL;
7871                 return 0;
7872         }
7873   
7874         if (data)
7875                 dev->features |= NETIF_F_IP_CSUM;
7876         else
7877                 dev->features &= ~NETIF_F_IP_CSUM;
7878
7879         return 0;
7880 }
7881
7882 static int tg3_get_stats_count (struct net_device *dev)
7883 {
7884         return TG3_NUM_STATS;
7885 }
7886
7887 static int tg3_get_test_count (struct net_device *dev)
7888 {
7889         return TG3_NUM_TEST;
7890 }
7891
7892 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7893 {
7894         switch (stringset) {
7895         case ETH_SS_STATS:
7896                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7897                 break;
7898         case ETH_SS_TEST:
7899                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7900                 break;
7901         default:
7902                 WARN_ON(1);     /* we need a WARN() */
7903                 break;
7904         }
7905 }
7906
7907 static int tg3_phys_id(struct net_device *dev, u32 data)
7908 {
7909         struct tg3 *tp = netdev_priv(dev);
7910         int i;
7911
7912         if (!netif_running(tp->dev))
7913                 return -EAGAIN;
7914
7915         if (data == 0)
7916                 data = 2;
7917
7918         for (i = 0; i < (data * 2); i++) {
7919                 if ((i % 2) == 0)
7920                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7921                                            LED_CTRL_1000MBPS_ON |
7922                                            LED_CTRL_100MBPS_ON |
7923                                            LED_CTRL_10MBPS_ON |
7924                                            LED_CTRL_TRAFFIC_OVERRIDE |
7925                                            LED_CTRL_TRAFFIC_BLINK |
7926                                            LED_CTRL_TRAFFIC_LED);
7927         
7928                 else
7929                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7930                                            LED_CTRL_TRAFFIC_OVERRIDE);
7931
7932                 if (msleep_interruptible(500))
7933                         break;
7934         }
7935         tw32(MAC_LED_CTRL, tp->led_ctrl);
7936         return 0;
7937 }
7938
7939 static void tg3_get_ethtool_stats (struct net_device *dev,
7940                                    struct ethtool_stats *estats, u64 *tmp_stats)
7941 {
7942         struct tg3 *tp = netdev_priv(dev);
7943         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7944 }
7945
7946 #define NVRAM_TEST_SIZE 0x100
7947 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7948
7949 static int tg3_test_nvram(struct tg3 *tp)
7950 {
7951         u32 *buf, csum, magic;
7952         int i, j, err = 0, size;
7953
7954         if (tg3_nvram_read(tp, 0, &magic) != 0)
7955                 return -EIO;
7956
7957         magic = swab32(magic);
7958         if (magic == TG3_EEPROM_MAGIC)
7959                 size = NVRAM_TEST_SIZE;
7960         else if ((magic & 0xff000000) == 0xa5000000) {
7961                 if ((magic & 0xe00000) == 0x200000)
7962                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
7963                 else
7964                         return 0;
7965         } else
7966                 return -EIO;
7967
7968         buf = kmalloc(size, GFP_KERNEL);
7969         if (buf == NULL)
7970                 return -ENOMEM;
7971
7972         err = -EIO;
7973         for (i = 0, j = 0; i < size; i += 4, j++) {
7974                 u32 val;
7975
7976                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7977                         break;
7978                 buf[j] = cpu_to_le32(val);
7979         }
7980         if (i < size)
7981                 goto out;
7982
7983         /* Selfboot format */
7984         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
7985                 u8 *buf8 = (u8 *) buf, csum8 = 0;
7986
7987                 for (i = 0; i < size; i++)
7988                         csum8 += buf8[i];
7989
7990                 if (csum8 == 0)
7991                         return 0;
7992                 return -EIO;
7993         }
7994
7995         /* Bootstrap checksum at offset 0x10 */
7996         csum = calc_crc((unsigned char *) buf, 0x10);
7997         if(csum != cpu_to_le32(buf[0x10/4]))
7998                 goto out;
7999
8000         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8001         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8002         if (csum != cpu_to_le32(buf[0xfc/4]))
8003                  goto out;
8004
8005         err = 0;
8006
8007 out:
8008         kfree(buf);
8009         return err;
8010 }
8011
8012 #define TG3_SERDES_TIMEOUT_SEC  2
8013 #define TG3_COPPER_TIMEOUT_SEC  6
8014
8015 static int tg3_test_link(struct tg3 *tp)
8016 {
8017         int i, max;
8018
8019         if (!netif_running(tp->dev))
8020                 return -ENODEV;
8021
8022         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8023                 max = TG3_SERDES_TIMEOUT_SEC;
8024         else
8025                 max = TG3_COPPER_TIMEOUT_SEC;
8026
8027         for (i = 0; i < max; i++) {
8028                 if (netif_carrier_ok(tp->dev))
8029                         return 0;
8030
8031                 if (msleep_interruptible(1000))
8032                         break;
8033         }
8034
8035         return -EIO;
8036 }
8037
8038 /* Only test the commonly used registers */
8039 static const int tg3_test_registers(struct tg3 *tp)
8040 {
8041         int i, is_5705;
8042         u32 offset, read_mask, write_mask, val, save_val, read_val;
8043         static struct {
8044                 u16 offset;
8045                 u16 flags;
8046 #define TG3_FL_5705     0x1
8047 #define TG3_FL_NOT_5705 0x2
8048 #define TG3_FL_NOT_5788 0x4
8049                 u32 read_mask;
8050                 u32 write_mask;
8051         } reg_tbl[] = {
8052                 /* MAC Control Registers */
8053                 { MAC_MODE, TG3_FL_NOT_5705,
8054                         0x00000000, 0x00ef6f8c },
8055                 { MAC_MODE, TG3_FL_5705,
8056                         0x00000000, 0x01ef6b8c },
8057                 { MAC_STATUS, TG3_FL_NOT_5705,
8058                         0x03800107, 0x00000000 },
8059                 { MAC_STATUS, TG3_FL_5705,
8060                         0x03800100, 0x00000000 },
8061                 { MAC_ADDR_0_HIGH, 0x0000,
8062                         0x00000000, 0x0000ffff },
8063                 { MAC_ADDR_0_LOW, 0x0000,
8064                         0x00000000, 0xffffffff },
8065                 { MAC_RX_MTU_SIZE, 0x0000,
8066                         0x00000000, 0x0000ffff },
8067                 { MAC_TX_MODE, 0x0000,
8068                         0x00000000, 0x00000070 },
8069                 { MAC_TX_LENGTHS, 0x0000,
8070                         0x00000000, 0x00003fff },
8071                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8072                         0x00000000, 0x000007fc },
8073                 { MAC_RX_MODE, TG3_FL_5705,
8074                         0x00000000, 0x000007dc },
8075                 { MAC_HASH_REG_0, 0x0000,
8076                         0x00000000, 0xffffffff },
8077                 { MAC_HASH_REG_1, 0x0000,
8078                         0x00000000, 0xffffffff },
8079                 { MAC_HASH_REG_2, 0x0000,
8080                         0x00000000, 0xffffffff },
8081                 { MAC_HASH_REG_3, 0x0000,
8082                         0x00000000, 0xffffffff },
8083
8084                 /* Receive Data and Receive BD Initiator Control Registers. */
8085                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8086                         0x00000000, 0xffffffff },
8087                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8088                         0x00000000, 0xffffffff },
8089                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8090                         0x00000000, 0x00000003 },
8091                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8092                         0x00000000, 0xffffffff },
8093                 { RCVDBDI_STD_BD+0, 0x0000,
8094                         0x00000000, 0xffffffff },
8095                 { RCVDBDI_STD_BD+4, 0x0000,
8096                         0x00000000, 0xffffffff },
8097                 { RCVDBDI_STD_BD+8, 0x0000,
8098                         0x00000000, 0xffff0002 },
8099                 { RCVDBDI_STD_BD+0xc, 0x0000,
8100                         0x00000000, 0xffffffff },
8101         
8102                 /* Receive BD Initiator Control Registers. */
8103                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8104                         0x00000000, 0xffffffff },
8105                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8106                         0x00000000, 0x000003ff },
8107                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8108                         0x00000000, 0xffffffff },
8109         
8110                 /* Host Coalescing Control Registers. */
8111                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8112                         0x00000000, 0x00000004 },
8113                 { HOSTCC_MODE, TG3_FL_5705,
8114                         0x00000000, 0x000000f6 },
8115                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8116                         0x00000000, 0xffffffff },
8117                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8118                         0x00000000, 0x000003ff },
8119                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8120                         0x00000000, 0xffffffff },
8121                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8122                         0x00000000, 0x000003ff },
8123                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8124                         0x00000000, 0xffffffff },
8125                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8126                         0x00000000, 0x000000ff },
8127                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8128                         0x00000000, 0xffffffff },
8129                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8130                         0x00000000, 0x000000ff },
8131                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8132                         0x00000000, 0xffffffff },
8133                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8134                         0x00000000, 0xffffffff },
8135                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8136                         0x00000000, 0xffffffff },
8137                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8138                         0x00000000, 0x000000ff },
8139                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8140                         0x00000000, 0xffffffff },
8141                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8142                         0x00000000, 0x000000ff },
8143                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8144                         0x00000000, 0xffffffff },
8145                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8146                         0x00000000, 0xffffffff },
8147                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8148                         0x00000000, 0xffffffff },
8149                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8150                         0x00000000, 0xffffffff },
8151                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8152                         0x00000000, 0xffffffff },
8153                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8154                         0xffffffff, 0x00000000 },
8155                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8156                         0xffffffff, 0x00000000 },
8157
8158                 /* Buffer Manager Control Registers. */
8159                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8160                         0x00000000, 0x007fff80 },
8161                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8162                         0x00000000, 0x007fffff },
8163                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8164                         0x00000000, 0x0000003f },
8165                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8166                         0x00000000, 0x000001ff },
8167                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8168                         0x00000000, 0x000001ff },
8169                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8170                         0xffffffff, 0x00000000 },
8171                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8172                         0xffffffff, 0x00000000 },
8173         
8174                 /* Mailbox Registers */
8175                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8176                         0x00000000, 0x000001ff },
8177                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8178                         0x00000000, 0x000001ff },
8179                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8180                         0x00000000, 0x000007ff },
8181                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8182                         0x00000000, 0x000001ff },
8183
8184                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8185         };
8186
8187         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8188                 is_5705 = 1;
8189         else
8190                 is_5705 = 0;
8191
8192         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8193                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8194                         continue;
8195
8196                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8197                         continue;
8198
8199                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8200                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8201                         continue;
8202
8203                 offset = (u32) reg_tbl[i].offset;
8204                 read_mask = reg_tbl[i].read_mask;
8205                 write_mask = reg_tbl[i].write_mask;
8206
8207                 /* Save the original register content */
8208                 save_val = tr32(offset);
8209
8210                 /* Determine the read-only value. */
8211                 read_val = save_val & read_mask;
8212
8213                 /* Write zero to the register, then make sure the read-only bits
8214                  * are not changed and the read/write bits are all zeros.
8215                  */
8216                 tw32(offset, 0);
8217
8218                 val = tr32(offset);
8219
8220                 /* Test the read-only and read/write bits. */
8221                 if (((val & read_mask) != read_val) || (val & write_mask))
8222                         goto out;
8223
8224                 /* Write ones to all the bits defined by RdMask and WrMask, then
8225                  * make sure the read-only bits are not changed and the
8226                  * read/write bits are all ones.
8227                  */
8228                 tw32(offset, read_mask | write_mask);
8229
8230                 val = tr32(offset);
8231
8232                 /* Test the read-only bits. */
8233                 if ((val & read_mask) != read_val)
8234                         goto out;
8235
8236                 /* Test the read/write bits. */
8237                 if ((val & write_mask) != write_mask)
8238                         goto out;
8239
8240                 tw32(offset, save_val);
8241         }
8242
8243         return 0;
8244
8245 out:
8246         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8247         tw32(offset, save_val);
8248         return -EIO;
8249 }
8250
8251 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8252 {
8253         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8254         int i;
8255         u32 j;
8256
8257         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8258                 for (j = 0; j < len; j += 4) {
8259                         u32 val;
8260
8261                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8262                         tg3_read_mem(tp, offset + j, &val);
8263                         if (val != test_pattern[i])
8264                                 return -EIO;
8265                 }
8266         }
8267         return 0;
8268 }
8269
8270 static int tg3_test_memory(struct tg3 *tp)
8271 {
8272         static struct mem_entry {
8273                 u32 offset;
8274                 u32 len;
8275         } mem_tbl_570x[] = {
8276                 { 0x00000000, 0x00b50},
8277                 { 0x00002000, 0x1c000},
8278                 { 0xffffffff, 0x00000}
8279         }, mem_tbl_5705[] = {
8280                 { 0x00000100, 0x0000c},
8281                 { 0x00000200, 0x00008},
8282                 { 0x00004000, 0x00800},
8283                 { 0x00006000, 0x01000},
8284                 { 0x00008000, 0x02000},
8285                 { 0x00010000, 0x0e000},
8286                 { 0xffffffff, 0x00000}
8287         };
8288         struct mem_entry *mem_tbl;
8289         int err = 0;
8290         int i;
8291
8292         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8293                 mem_tbl = mem_tbl_5705;
8294         else
8295                 mem_tbl = mem_tbl_570x;
8296
8297         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8298                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8299                     mem_tbl[i].len)) != 0)
8300                         break;
8301         }
8302         
8303         return err;
8304 }
8305
8306 #define TG3_MAC_LOOPBACK        0
8307 #define TG3_PHY_LOOPBACK        1
8308
8309 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8310 {
8311         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8312         u32 desc_idx;
8313         struct sk_buff *skb, *rx_skb;
8314         u8 *tx_data;
8315         dma_addr_t map;
8316         int num_pkts, tx_len, rx_len, i, err;
8317         struct tg3_rx_buffer_desc *desc;
8318
8319         if (loopback_mode == TG3_MAC_LOOPBACK) {
8320                 /* HW errata - mac loopback fails in some cases on 5780.
8321                  * Normal traffic and PHY loopback are not affected by
8322                  * errata.
8323                  */
8324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8325                         return 0;
8326
8327                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8328                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8329                            MAC_MODE_PORT_MODE_GMII;
8330                 tw32(MAC_MODE, mac_mode);
8331         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8332                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8333                                            BMCR_SPEED1000);
8334                 udelay(40);
8335                 /* reset to prevent losing 1st rx packet intermittently */
8336                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8337                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8338                         udelay(10);
8339                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8340                 }
8341                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8342                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8343                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8344                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8345                 tw32(MAC_MODE, mac_mode);
8346         }
8347         else
8348                 return -EINVAL;
8349
8350         err = -EIO;
8351
8352         tx_len = 1514;
8353         skb = dev_alloc_skb(tx_len);
8354         tx_data = skb_put(skb, tx_len);
8355         memcpy(tx_data, tp->dev->dev_addr, 6);
8356         memset(tx_data + 6, 0x0, 8);
8357
8358         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8359
8360         for (i = 14; i < tx_len; i++)
8361                 tx_data[i] = (u8) (i & 0xff);
8362
8363         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8364
8365         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8366              HOSTCC_MODE_NOW);
8367
8368         udelay(10);
8369
8370         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8371
8372         num_pkts = 0;
8373
8374         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8375
8376         tp->tx_prod++;
8377         num_pkts++;
8378
8379         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8380                      tp->tx_prod);
8381         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8382
8383         udelay(10);
8384
8385         for (i = 0; i < 10; i++) {
8386                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8387                        HOSTCC_MODE_NOW);
8388
8389                 udelay(10);
8390
8391                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8392                 rx_idx = tp->hw_status->idx[0].rx_producer;
8393                 if ((tx_idx == tp->tx_prod) &&
8394                     (rx_idx == (rx_start_idx + num_pkts)))
8395                         break;
8396         }
8397
8398         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8399         dev_kfree_skb(skb);
8400
8401         if (tx_idx != tp->tx_prod)
8402                 goto out;
8403
8404         if (rx_idx != rx_start_idx + num_pkts)
8405                 goto out;
8406
8407         desc = &tp->rx_rcb[rx_start_idx];
8408         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8409         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8410         if (opaque_key != RXD_OPAQUE_RING_STD)
8411                 goto out;
8412
8413         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8414             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8415                 goto out;
8416
8417         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8418         if (rx_len != tx_len)
8419                 goto out;
8420
8421         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8422
8423         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8424         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8425
8426         for (i = 14; i < tx_len; i++) {
8427                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8428                         goto out;
8429         }
8430         err = 0;
8431         
8432         /* tg3_free_rings will unmap and free the rx_skb */
8433 out:
8434         return err;
8435 }
8436
8437 #define TG3_MAC_LOOPBACK_FAILED         1
8438 #define TG3_PHY_LOOPBACK_FAILED         2
8439 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8440                                          TG3_PHY_LOOPBACK_FAILED)
8441
8442 static int tg3_test_loopback(struct tg3 *tp)
8443 {
8444         int err = 0;
8445
8446         if (!netif_running(tp->dev))
8447                 return TG3_LOOPBACK_FAILED;
8448
8449         tg3_reset_hw(tp);
8450
8451         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8452                 err |= TG3_MAC_LOOPBACK_FAILED;
8453         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8454                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8455                         err |= TG3_PHY_LOOPBACK_FAILED;
8456         }
8457
8458         return err;
8459 }
8460
8461 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8462                           u64 *data)
8463 {
8464         struct tg3 *tp = netdev_priv(dev);
8465
8466         if (tp->link_config.phy_is_low_power)
8467                 tg3_set_power_state(tp, PCI_D0);
8468
8469         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8470
8471         if (tg3_test_nvram(tp) != 0) {
8472                 etest->flags |= ETH_TEST_FL_FAILED;
8473                 data[0] = 1;
8474         }
8475         if (tg3_test_link(tp) != 0) {
8476                 etest->flags |= ETH_TEST_FL_FAILED;
8477                 data[1] = 1;
8478         }
8479         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8480                 int err, irq_sync = 0;
8481
8482                 if (netif_running(dev)) {
8483                         tg3_netif_stop(tp);
8484                         irq_sync = 1;
8485                 }
8486
8487                 tg3_full_lock(tp, irq_sync);
8488
8489                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8490                 err = tg3_nvram_lock(tp);
8491                 tg3_halt_cpu(tp, RX_CPU_BASE);
8492                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8493                         tg3_halt_cpu(tp, TX_CPU_BASE);
8494                 if (!err)
8495                         tg3_nvram_unlock(tp);
8496
8497                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8498                         tg3_phy_reset(tp);
8499
8500                 if (tg3_test_registers(tp) != 0) {
8501                         etest->flags |= ETH_TEST_FL_FAILED;
8502                         data[2] = 1;
8503                 }
8504                 if (tg3_test_memory(tp) != 0) {
8505                         etest->flags |= ETH_TEST_FL_FAILED;
8506                         data[3] = 1;
8507                 }
8508                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8509                         etest->flags |= ETH_TEST_FL_FAILED;
8510
8511                 tg3_full_unlock(tp);
8512
8513                 if (tg3_test_interrupt(tp) != 0) {
8514                         etest->flags |= ETH_TEST_FL_FAILED;
8515                         data[5] = 1;
8516                 }
8517
8518                 tg3_full_lock(tp, 0);
8519
8520                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8521                 if (netif_running(dev)) {
8522                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8523                         tg3_init_hw(tp);
8524                         tg3_netif_start(tp);
8525                 }
8526
8527                 tg3_full_unlock(tp);
8528         }
8529         if (tp->link_config.phy_is_low_power)
8530                 tg3_set_power_state(tp, PCI_D3hot);
8531
8532 }
8533
8534 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8535 {
8536         struct mii_ioctl_data *data = if_mii(ifr);
8537         struct tg3 *tp = netdev_priv(dev);
8538         int err;
8539
8540         switch(cmd) {
8541         case SIOCGMIIPHY:
8542                 data->phy_id = PHY_ADDR;
8543
8544                 /* fallthru */
8545         case SIOCGMIIREG: {
8546                 u32 mii_regval;
8547
8548                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8549                         break;                  /* We have no PHY */
8550
8551                 if (tp->link_config.phy_is_low_power)
8552                         return -EAGAIN;
8553
8554                 spin_lock_bh(&tp->lock);
8555                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8556                 spin_unlock_bh(&tp->lock);
8557
8558                 data->val_out = mii_regval;
8559
8560                 return err;
8561         }
8562
8563         case SIOCSMIIREG:
8564                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8565                         break;                  /* We have no PHY */
8566
8567                 if (!capable(CAP_NET_ADMIN))
8568                         return -EPERM;
8569
8570                 if (tp->link_config.phy_is_low_power)
8571                         return -EAGAIN;
8572
8573                 spin_lock_bh(&tp->lock);
8574                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8575                 spin_unlock_bh(&tp->lock);
8576
8577                 return err;
8578
8579         default:
8580                 /* do nothing */
8581                 break;
8582         }
8583         return -EOPNOTSUPP;
8584 }
8585
8586 #if TG3_VLAN_TAG_USED
8587 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8588 {
8589         struct tg3 *tp = netdev_priv(dev);
8590
8591         tg3_full_lock(tp, 0);
8592
8593         tp->vlgrp = grp;
8594
8595         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8596         __tg3_set_rx_mode(dev);
8597
8598         tg3_full_unlock(tp);
8599 }
8600
8601 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8602 {
8603         struct tg3 *tp = netdev_priv(dev);
8604
8605         tg3_full_lock(tp, 0);
8606         if (tp->vlgrp)
8607                 tp->vlgrp->vlan_devices[vid] = NULL;
8608         tg3_full_unlock(tp);
8609 }
8610 #endif
8611
8612 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8613 {
8614         struct tg3 *tp = netdev_priv(dev);
8615
8616         memcpy(ec, &tp->coal, sizeof(*ec));
8617         return 0;
8618 }
8619
8620 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8621 {
8622         struct tg3 *tp = netdev_priv(dev);
8623         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8624         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8625
8626         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8627                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8628                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8629                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8630                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8631         }
8632
8633         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8634             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8635             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8636             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8637             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8638             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8639             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8640             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8641             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8642             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8643                 return -EINVAL;
8644
8645         /* No rx interrupts will be generated if both are zero */
8646         if ((ec->rx_coalesce_usecs == 0) &&
8647             (ec->rx_max_coalesced_frames == 0))
8648                 return -EINVAL;
8649
8650         /* No tx interrupts will be generated if both are zero */
8651         if ((ec->tx_coalesce_usecs == 0) &&
8652             (ec->tx_max_coalesced_frames == 0))
8653                 return -EINVAL;
8654
8655         /* Only copy relevant parameters, ignore all others. */
8656         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8657         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8658         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8659         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8660         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8661         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8662         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8663         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8664         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8665
8666         if (netif_running(dev)) {
8667                 tg3_full_lock(tp, 0);
8668                 __tg3_set_coalesce(tp, &tp->coal);
8669                 tg3_full_unlock(tp);
8670         }
8671         return 0;
8672 }
8673
8674 static struct ethtool_ops tg3_ethtool_ops = {
8675         .get_settings           = tg3_get_settings,
8676         .set_settings           = tg3_set_settings,
8677         .get_drvinfo            = tg3_get_drvinfo,
8678         .get_regs_len           = tg3_get_regs_len,
8679         .get_regs               = tg3_get_regs,
8680         .get_wol                = tg3_get_wol,
8681         .set_wol                = tg3_set_wol,
8682         .get_msglevel           = tg3_get_msglevel,
8683         .set_msglevel           = tg3_set_msglevel,
8684         .nway_reset             = tg3_nway_reset,
8685         .get_link               = ethtool_op_get_link,
8686         .get_eeprom_len         = tg3_get_eeprom_len,
8687         .get_eeprom             = tg3_get_eeprom,
8688         .set_eeprom             = tg3_set_eeprom,
8689         .get_ringparam          = tg3_get_ringparam,
8690         .set_ringparam          = tg3_set_ringparam,
8691         .get_pauseparam         = tg3_get_pauseparam,
8692         .set_pauseparam         = tg3_set_pauseparam,
8693         .get_rx_csum            = tg3_get_rx_csum,
8694         .set_rx_csum            = tg3_set_rx_csum,
8695         .get_tx_csum            = ethtool_op_get_tx_csum,
8696         .set_tx_csum            = tg3_set_tx_csum,
8697         .get_sg                 = ethtool_op_get_sg,
8698         .set_sg                 = ethtool_op_set_sg,
8699 #if TG3_TSO_SUPPORT != 0
8700         .get_tso                = ethtool_op_get_tso,
8701         .set_tso                = tg3_set_tso,
8702 #endif
8703         .self_test_count        = tg3_get_test_count,
8704         .self_test              = tg3_self_test,
8705         .get_strings            = tg3_get_strings,
8706         .phys_id                = tg3_phys_id,
8707         .get_stats_count        = tg3_get_stats_count,
8708         .get_ethtool_stats      = tg3_get_ethtool_stats,
8709         .get_coalesce           = tg3_get_coalesce,
8710         .set_coalesce           = tg3_set_coalesce,
8711         .get_perm_addr          = ethtool_op_get_perm_addr,
8712 };
8713
8714 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8715 {
8716         u32 cursize, val, magic;
8717
8718         tp->nvram_size = EEPROM_CHIP_SIZE;
8719
8720         if (tg3_nvram_read(tp, 0, &val) != 0)
8721                 return;
8722
8723         magic = swab32(val);
8724         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8725                 return;
8726
8727         /*
8728          * Size the chip by reading offsets at increasing powers of two.
8729          * When we encounter our validation signature, we know the addressing
8730          * has wrapped around, and thus have our chip size.
8731          */
8732         cursize = 0x10;
8733
8734         while (cursize < tp->nvram_size) {
8735                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8736                         return;
8737
8738                 if (swab32(val) == magic)
8739                         break;
8740
8741                 cursize <<= 1;
8742         }
8743
8744         tp->nvram_size = cursize;
8745 }
8746                 
8747 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8748 {
8749         u32 val;
8750
8751         if (tg3_nvram_read(tp, 0, &val) != 0)
8752                 return;
8753
8754         /* Selfboot format */
8755         if (swab32(val) != TG3_EEPROM_MAGIC) {
8756                 tg3_get_eeprom_size(tp);
8757                 return;
8758         }
8759
8760         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8761                 if (val != 0) {
8762                         tp->nvram_size = (val >> 16) * 1024;
8763                         return;
8764                 }
8765         }
8766         tp->nvram_size = 0x20000;
8767 }
8768
8769 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8770 {
8771         u32 nvcfg1;
8772
8773         nvcfg1 = tr32(NVRAM_CFG1);
8774         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8775                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8776         }
8777         else {
8778                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8779                 tw32(NVRAM_CFG1, nvcfg1);
8780         }
8781
8782         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8783             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8784                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8785                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8786                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8787                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8788                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8789                                 break;
8790                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8791                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8792                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8793                                 break;
8794                         case FLASH_VENDOR_ATMEL_EEPROM:
8795                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8796                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8797                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8798                                 break;
8799                         case FLASH_VENDOR_ST:
8800                                 tp->nvram_jedecnum = JEDEC_ST;
8801                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8802                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8803                                 break;
8804                         case FLASH_VENDOR_SAIFUN:
8805                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8806                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8807                                 break;
8808                         case FLASH_VENDOR_SST_SMALL:
8809                         case FLASH_VENDOR_SST_LARGE:
8810                                 tp->nvram_jedecnum = JEDEC_SST;
8811                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8812                                 break;
8813                 }
8814         }
8815         else {
8816                 tp->nvram_jedecnum = JEDEC_ATMEL;
8817                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8818                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8819         }
8820 }
8821
8822 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8823 {
8824         u32 nvcfg1;
8825
8826         nvcfg1 = tr32(NVRAM_CFG1);
8827
8828         /* NVRAM protection for TPM */
8829         if (nvcfg1 & (1 << 27))
8830                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8831
8832         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8833                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8834                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8835                         tp->nvram_jedecnum = JEDEC_ATMEL;
8836                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8837                         break;
8838                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8839                         tp->nvram_jedecnum = JEDEC_ATMEL;
8840                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8841                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8842                         break;
8843                 case FLASH_5752VENDOR_ST_M45PE10:
8844                 case FLASH_5752VENDOR_ST_M45PE20:
8845                 case FLASH_5752VENDOR_ST_M45PE40:
8846                         tp->nvram_jedecnum = JEDEC_ST;
8847                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8848                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8849                         break;
8850         }
8851
8852         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8853                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8854                         case FLASH_5752PAGE_SIZE_256:
8855                                 tp->nvram_pagesize = 256;
8856                                 break;
8857                         case FLASH_5752PAGE_SIZE_512:
8858                                 tp->nvram_pagesize = 512;
8859                                 break;
8860                         case FLASH_5752PAGE_SIZE_1K:
8861                                 tp->nvram_pagesize = 1024;
8862                                 break;
8863                         case FLASH_5752PAGE_SIZE_2K:
8864                                 tp->nvram_pagesize = 2048;
8865                                 break;
8866                         case FLASH_5752PAGE_SIZE_4K:
8867                                 tp->nvram_pagesize = 4096;
8868                                 break;
8869                         case FLASH_5752PAGE_SIZE_264:
8870                                 tp->nvram_pagesize = 264;
8871                                 break;
8872                 }
8873         }
8874         else {
8875                 /* For eeprom, set pagesize to maximum eeprom size */
8876                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8877
8878                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8879                 tw32(NVRAM_CFG1, nvcfg1);
8880         }
8881 }
8882
8883 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8884 {
8885         u32 nvcfg1;
8886
8887         nvcfg1 = tr32(NVRAM_CFG1);
8888
8889         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8890                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8891                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8892                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8893                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8894                         tp->nvram_jedecnum = JEDEC_ATMEL;
8895                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8896                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8897
8898                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8899                         tw32(NVRAM_CFG1, nvcfg1);
8900                         break;
8901                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8902                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8903                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8904                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8905                         tp->nvram_jedecnum = JEDEC_ATMEL;
8906                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8907                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8908                         tp->nvram_pagesize = 264;
8909                         break;
8910                 case FLASH_5752VENDOR_ST_M45PE10:
8911                 case FLASH_5752VENDOR_ST_M45PE20:
8912                 case FLASH_5752VENDOR_ST_M45PE40:
8913                         tp->nvram_jedecnum = JEDEC_ST;
8914                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8915                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8916                         tp->nvram_pagesize = 256;
8917                         break;
8918         }
8919 }
8920
8921 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8922 static void __devinit tg3_nvram_init(struct tg3 *tp)
8923 {
8924         int j;
8925
8926         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8927                 return;
8928
8929         tw32_f(GRC_EEPROM_ADDR,
8930              (EEPROM_ADDR_FSM_RESET |
8931               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8932                EEPROM_ADDR_CLKPERD_SHIFT)));
8933
8934         /* XXX schedule_timeout() ... */
8935         for (j = 0; j < 100; j++)
8936                 udelay(10);
8937
8938         /* Enable seeprom accesses. */
8939         tw32_f(GRC_LOCAL_CTRL,
8940              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8941         udelay(100);
8942
8943         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8944             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8945                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8946
8947                 if (tg3_nvram_lock(tp)) {
8948                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8949                                "tg3_nvram_init failed.\n", tp->dev->name);
8950                         return;
8951                 }
8952                 tg3_enable_nvram_access(tp);
8953
8954                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8955                         tg3_get_5752_nvram_info(tp);
8956                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8957                         tg3_get_5787_nvram_info(tp);
8958                 else
8959                         tg3_get_nvram_info(tp);
8960
8961                 tg3_get_nvram_size(tp);
8962
8963                 tg3_disable_nvram_access(tp);
8964                 tg3_nvram_unlock(tp);
8965
8966         } else {
8967                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8968
8969                 tg3_get_eeprom_size(tp);
8970         }
8971 }
8972
8973 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8974                                         u32 offset, u32 *val)
8975 {
8976         u32 tmp;
8977         int i;
8978
8979         if (offset > EEPROM_ADDR_ADDR_MASK ||
8980             (offset % 4) != 0)
8981                 return -EINVAL;
8982
8983         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8984                                         EEPROM_ADDR_DEVID_MASK |
8985                                         EEPROM_ADDR_READ);
8986         tw32(GRC_EEPROM_ADDR,
8987              tmp |
8988              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8989              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8990               EEPROM_ADDR_ADDR_MASK) |
8991              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8992
8993         for (i = 0; i < 10000; i++) {
8994                 tmp = tr32(GRC_EEPROM_ADDR);
8995
8996                 if (tmp & EEPROM_ADDR_COMPLETE)
8997                         break;
8998                 udelay(100);
8999         }
9000         if (!(tmp & EEPROM_ADDR_COMPLETE))
9001                 return -EBUSY;
9002
9003         *val = tr32(GRC_EEPROM_DATA);
9004         return 0;
9005 }
9006
9007 #define NVRAM_CMD_TIMEOUT 10000
9008
9009 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9010 {
9011         int i;
9012
9013         tw32(NVRAM_CMD, nvram_cmd);
9014         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9015                 udelay(10);
9016                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9017                         udelay(10);
9018                         break;
9019                 }
9020         }
9021         if (i == NVRAM_CMD_TIMEOUT) {
9022                 return -EBUSY;
9023         }
9024         return 0;
9025 }
9026
9027 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9028 {
9029         int ret;
9030
9031         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9032                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9033                 return -EINVAL;
9034         }
9035
9036         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9037                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9038
9039         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9040                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9041                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
9042
9043                 offset = ((offset / tp->nvram_pagesize) <<
9044                           ATMEL_AT45DB0X1B_PAGE_POS) +
9045                         (offset % tp->nvram_pagesize);
9046         }
9047
9048         if (offset > NVRAM_ADDR_MSK)
9049                 return -EINVAL;
9050
9051         ret = tg3_nvram_lock(tp);
9052         if (ret)
9053                 return ret;
9054
9055         tg3_enable_nvram_access(tp);
9056
9057         tw32(NVRAM_ADDR, offset);
9058         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9059                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9060
9061         if (ret == 0)
9062                 *val = swab32(tr32(NVRAM_RDDATA));
9063
9064         tg3_disable_nvram_access(tp);
9065
9066         tg3_nvram_unlock(tp);
9067
9068         return ret;
9069 }
9070
9071 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9072                                     u32 offset, u32 len, u8 *buf)
9073 {
9074         int i, j, rc = 0;
9075         u32 val;
9076
9077         for (i = 0; i < len; i += 4) {
9078                 u32 addr, data;
9079
9080                 addr = offset + i;
9081
9082                 memcpy(&data, buf + i, 4);
9083
9084                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9085
9086                 val = tr32(GRC_EEPROM_ADDR);
9087                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9088
9089                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9090                         EEPROM_ADDR_READ);
9091                 tw32(GRC_EEPROM_ADDR, val |
9092                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9093                         (addr & EEPROM_ADDR_ADDR_MASK) |
9094                         EEPROM_ADDR_START |
9095                         EEPROM_ADDR_WRITE);
9096                 
9097                 for (j = 0; j < 10000; j++) {
9098                         val = tr32(GRC_EEPROM_ADDR);
9099
9100                         if (val & EEPROM_ADDR_COMPLETE)
9101                                 break;
9102                         udelay(100);
9103                 }
9104                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9105                         rc = -EBUSY;
9106                         break;
9107                 }
9108         }
9109
9110         return rc;
9111 }
9112
9113 /* offset and length are dword aligned */
9114 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9115                 u8 *buf)
9116 {
9117         int ret = 0;
9118         u32 pagesize = tp->nvram_pagesize;
9119         u32 pagemask = pagesize - 1;
9120         u32 nvram_cmd;
9121         u8 *tmp;
9122
9123         tmp = kmalloc(pagesize, GFP_KERNEL);
9124         if (tmp == NULL)
9125                 return -ENOMEM;
9126
9127         while (len) {
9128                 int j;
9129                 u32 phy_addr, page_off, size;
9130
9131                 phy_addr = offset & ~pagemask;
9132         
9133                 for (j = 0; j < pagesize; j += 4) {
9134                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9135                                                 (u32 *) (tmp + j))))
9136                                 break;
9137                 }
9138                 if (ret)
9139                         break;
9140
9141                 page_off = offset & pagemask;
9142                 size = pagesize;
9143                 if (len < size)
9144                         size = len;
9145
9146                 len -= size;
9147
9148                 memcpy(tmp + page_off, buf, size);
9149
9150                 offset = offset + (pagesize - page_off);
9151
9152                 tg3_enable_nvram_access(tp);
9153
9154                 /*
9155                  * Before we can erase the flash page, we need
9156                  * to issue a special "write enable" command.
9157                  */
9158                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9159
9160                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9161                         break;
9162
9163                 /* Erase the target page */
9164                 tw32(NVRAM_ADDR, phy_addr);
9165
9166                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9167                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9168
9169                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9170                         break;
9171
9172                 /* Issue another write enable to start the write. */
9173                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9174
9175                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9176                         break;
9177
9178                 for (j = 0; j < pagesize; j += 4) {
9179                         u32 data;
9180
9181                         data = *((u32 *) (tmp + j));
9182                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9183
9184                         tw32(NVRAM_ADDR, phy_addr + j);
9185
9186                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9187                                 NVRAM_CMD_WR;
9188
9189                         if (j == 0)
9190                                 nvram_cmd |= NVRAM_CMD_FIRST;
9191                         else if (j == (pagesize - 4))
9192                                 nvram_cmd |= NVRAM_CMD_LAST;
9193
9194                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9195                                 break;
9196                 }
9197                 if (ret)
9198                         break;
9199         }
9200
9201         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9202         tg3_nvram_exec_cmd(tp, nvram_cmd);
9203
9204         kfree(tmp);
9205
9206         return ret;
9207 }
9208
9209 /* offset and length are dword aligned */
9210 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9211                 u8 *buf)
9212 {
9213         int i, ret = 0;
9214
9215         for (i = 0; i < len; i += 4, offset += 4) {
9216                 u32 data, page_off, phy_addr, nvram_cmd;
9217
9218                 memcpy(&data, buf + i, 4);
9219                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9220
9221                 page_off = offset % tp->nvram_pagesize;
9222
9223                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9224                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
9225
9226                         phy_addr = ((offset / tp->nvram_pagesize) <<
9227                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
9228                 }
9229                 else {
9230                         phy_addr = offset;
9231                 }
9232
9233                 tw32(NVRAM_ADDR, phy_addr);
9234
9235                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9236
9237                 if ((page_off == 0) || (i == 0))
9238                         nvram_cmd |= NVRAM_CMD_FIRST;
9239                 else if (page_off == (tp->nvram_pagesize - 4))
9240                         nvram_cmd |= NVRAM_CMD_LAST;
9241
9242                 if (i == (len - 4))
9243                         nvram_cmd |= NVRAM_CMD_LAST;
9244
9245                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9246                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9247                     (tp->nvram_jedecnum == JEDEC_ST) &&
9248                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9249
9250                         if ((ret = tg3_nvram_exec_cmd(tp,
9251                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9252                                 NVRAM_CMD_DONE)))
9253
9254                                 break;
9255                 }
9256                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9257                         /* We always do complete word writes to eeprom. */
9258                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9259                 }
9260
9261                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9262                         break;
9263         }
9264         return ret;
9265 }
9266
9267 /* offset and length are dword aligned */
9268 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9269 {
9270         int ret;
9271
9272         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9273                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9274                 return -EINVAL;
9275         }
9276
9277         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9278                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9279                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9280                 udelay(40);
9281         }
9282
9283         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9284                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9285         }
9286         else {
9287                 u32 grc_mode;
9288
9289                 ret = tg3_nvram_lock(tp);
9290                 if (ret)
9291                         return ret;
9292
9293                 tg3_enable_nvram_access(tp);
9294                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9295                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9296                         tw32(NVRAM_WRITE1, 0x406);
9297
9298                 grc_mode = tr32(GRC_MODE);
9299                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9300
9301                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9302                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9303
9304                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9305                                 buf);
9306                 }
9307                 else {
9308                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9309                                 buf);
9310                 }
9311
9312                 grc_mode = tr32(GRC_MODE);
9313                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9314
9315                 tg3_disable_nvram_access(tp);
9316                 tg3_nvram_unlock(tp);
9317         }
9318
9319         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9320                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9321                 udelay(40);
9322         }
9323
9324         return ret;
9325 }
9326
9327 struct subsys_tbl_ent {
9328         u16 subsys_vendor, subsys_devid;
9329         u32 phy_id;
9330 };
9331
9332 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9333         /* Broadcom boards. */
9334         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9335         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9336         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9337         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9338         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9339         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9340         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9341         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9342         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9343         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9344         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9345
9346         /* 3com boards. */
9347         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9348         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9349         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9350         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9351         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9352
9353         /* DELL boards. */
9354         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9355         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9356         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9357         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9358
9359         /* Compaq boards. */
9360         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9361         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9362         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9363         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9364         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9365
9366         /* IBM boards. */
9367         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9368 };
9369
9370 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9371 {
9372         int i;
9373
9374         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9375                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9376                      tp->pdev->subsystem_vendor) &&
9377                     (subsys_id_to_phy_id[i].subsys_devid ==
9378                      tp->pdev->subsystem_device))
9379                         return &subsys_id_to_phy_id[i];
9380         }
9381         return NULL;
9382 }
9383
9384 /* Since this function may be called in D3-hot power state during
9385  * tg3_init_one(), only config cycles are allowed.
9386  */
9387 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9388 {
9389         u32 val;
9390
9391         /* Make sure register accesses (indirect or otherwise)
9392          * will function correctly.
9393          */
9394         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9395                                tp->misc_host_ctrl);
9396
9397         tp->phy_id = PHY_ID_INVALID;
9398         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9399
9400         /* Do not even try poking around in here on Sun parts.  */
9401         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9402                 return;
9403
9404         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9405         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9406                 u32 nic_cfg, led_cfg;
9407                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9408                 int eeprom_phy_serdes = 0;
9409
9410                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9411                 tp->nic_sram_data_cfg = nic_cfg;
9412
9413                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9414                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9415                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9416                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9417                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9418                     (ver > 0) && (ver < 0x100))
9419                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9420
9421                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9422                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9423                         eeprom_phy_serdes = 1;
9424
9425                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9426                 if (nic_phy_id != 0) {
9427                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9428                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9429
9430                         eeprom_phy_id  = (id1 >> 16) << 10;
9431                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9432                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9433                 } else
9434                         eeprom_phy_id = 0;
9435
9436                 tp->phy_id = eeprom_phy_id;
9437                 if (eeprom_phy_serdes) {
9438                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9439                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9440                         else
9441                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9442                 }
9443
9444                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9445                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9446                                     SHASTA_EXT_LED_MODE_MASK);
9447                 else
9448                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9449
9450                 switch (led_cfg) {
9451                 default:
9452                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9453                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9454                         break;
9455
9456                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9457                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9458                         break;
9459
9460                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9461                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9462
9463                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9464                          * read on some older 5700/5701 bootcode.
9465                          */
9466                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9467                             ASIC_REV_5700 ||
9468                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9469                             ASIC_REV_5701)
9470                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9471
9472                         break;
9473
9474                 case SHASTA_EXT_LED_SHARED:
9475                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9476                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9477                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9478                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9479                                                  LED_CTRL_MODE_PHY_2);
9480                         break;
9481
9482                 case SHASTA_EXT_LED_MAC:
9483                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9484                         break;
9485
9486                 case SHASTA_EXT_LED_COMBO:
9487                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9488                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9489                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9490                                                  LED_CTRL_MODE_PHY_2);
9491                         break;
9492
9493                 };
9494
9495                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9496                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9497                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9498                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9499
9500                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9501                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9502                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9503                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9504
9505                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9506                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9507                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9508                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9509                 }
9510                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9511                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9512
9513                 if (cfg2 & (1 << 17))
9514                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9515
9516                 /* serdes signal pre-emphasis in register 0x590 set by */
9517                 /* bootcode if bit 18 is set */
9518                 if (cfg2 & (1 << 18))
9519                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9520         }
9521 }
9522
9523 static int __devinit tg3_phy_probe(struct tg3 *tp)
9524 {
9525         u32 hw_phy_id_1, hw_phy_id_2;
9526         u32 hw_phy_id, hw_phy_id_masked;
9527         int err;
9528
9529         /* Reading the PHY ID register can conflict with ASF
9530          * firwmare access to the PHY hardware.
9531          */
9532         err = 0;
9533         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9534                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9535         } else {
9536                 /* Now read the physical PHY_ID from the chip and verify
9537                  * that it is sane.  If it doesn't look good, we fall back
9538                  * to either the hard-coded table based PHY_ID and failing
9539                  * that the value found in the eeprom area.
9540                  */
9541                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9542                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9543
9544                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9545                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9546                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9547
9548                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9549         }
9550
9551         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9552                 tp->phy_id = hw_phy_id;
9553                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9554                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9555                 else
9556                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9557         } else {
9558                 if (tp->phy_id != PHY_ID_INVALID) {
9559                         /* Do nothing, phy ID already set up in
9560                          * tg3_get_eeprom_hw_cfg().
9561                          */
9562                 } else {
9563                         struct subsys_tbl_ent *p;
9564
9565                         /* No eeprom signature?  Try the hardcoded
9566                          * subsys device table.
9567                          */
9568                         p = lookup_by_subsys(tp);
9569                         if (!p)
9570                                 return -ENODEV;
9571
9572                         tp->phy_id = p->phy_id;
9573                         if (!tp->phy_id ||
9574                             tp->phy_id == PHY_ID_BCM8002)
9575                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9576                 }
9577         }
9578
9579         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9580             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9581                 u32 bmsr, adv_reg, tg3_ctrl;
9582
9583                 tg3_readphy(tp, MII_BMSR, &bmsr);
9584                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9585                     (bmsr & BMSR_LSTATUS))
9586                         goto skip_phy_reset;
9587                     
9588                 err = tg3_phy_reset(tp);
9589                 if (err)
9590                         return err;
9591
9592                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9593                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9594                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9595                 tg3_ctrl = 0;
9596                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9597                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9598                                     MII_TG3_CTRL_ADV_1000_FULL);
9599                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9600                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9601                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9602                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9603                 }
9604
9605                 if (!tg3_copper_is_advertising_all(tp)) {
9606                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9607
9608                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9609                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9610
9611                         tg3_writephy(tp, MII_BMCR,
9612                                      BMCR_ANENABLE | BMCR_ANRESTART);
9613                 }
9614                 tg3_phy_set_wirespeed(tp);
9615
9616                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9617                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9618                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9619         }
9620
9621 skip_phy_reset:
9622         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9623                 err = tg3_init_5401phy_dsp(tp);
9624                 if (err)
9625                         return err;
9626         }
9627
9628         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9629                 err = tg3_init_5401phy_dsp(tp);
9630         }
9631
9632         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9633                 tp->link_config.advertising =
9634                         (ADVERTISED_1000baseT_Half |
9635                          ADVERTISED_1000baseT_Full |
9636                          ADVERTISED_Autoneg |
9637                          ADVERTISED_FIBRE);
9638         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9639                 tp->link_config.advertising &=
9640                         ~(ADVERTISED_1000baseT_Half |
9641                           ADVERTISED_1000baseT_Full);
9642
9643         return err;
9644 }
9645
9646 static void __devinit tg3_read_partno(struct tg3 *tp)
9647 {
9648         unsigned char vpd_data[256];
9649         int i;
9650         u32 magic;
9651
9652         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9653                 /* Sun decided not to put the necessary bits in the
9654                  * NVRAM of their onboard tg3 parts :(
9655                  */
9656                 strcpy(tp->board_part_number, "Sun 570X");
9657                 return;
9658         }
9659
9660         if (tg3_nvram_read(tp, 0x0, &magic))
9661                 return;
9662
9663         if (swab32(magic) == TG3_EEPROM_MAGIC) {
9664                 for (i = 0; i < 256; i += 4) {
9665                         u32 tmp;
9666
9667                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9668                                 goto out_not_found;
9669
9670                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9671                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9672                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9673                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9674                 }
9675         } else {
9676                 int vpd_cap;
9677
9678                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9679                 for (i = 0; i < 256; i += 4) {
9680                         u32 tmp, j = 0;
9681                         u16 tmp16;
9682
9683                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9684                                               i);
9685                         while (j++ < 100) {
9686                                 pci_read_config_word(tp->pdev, vpd_cap +
9687                                                      PCI_VPD_ADDR, &tmp16);
9688                                 if (tmp16 & 0x8000)
9689                                         break;
9690                                 msleep(1);
9691                         }
9692                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9693                                               &tmp);
9694                         tmp = cpu_to_le32(tmp);
9695                         memcpy(&vpd_data[i], &tmp, 4);
9696                 }
9697         }
9698
9699         /* Now parse and find the part number. */
9700         for (i = 0; i < 256; ) {
9701                 unsigned char val = vpd_data[i];
9702                 int block_end;
9703
9704                 if (val == 0x82 || val == 0x91) {
9705                         i = (i + 3 +
9706                              (vpd_data[i + 1] +
9707                               (vpd_data[i + 2] << 8)));
9708                         continue;
9709                 }
9710
9711                 if (val != 0x90)
9712                         goto out_not_found;
9713
9714                 block_end = (i + 3 +
9715                              (vpd_data[i + 1] +
9716                               (vpd_data[i + 2] << 8)));
9717                 i += 3;
9718                 while (i < block_end) {
9719                         if (vpd_data[i + 0] == 'P' &&
9720                             vpd_data[i + 1] == 'N') {
9721                                 int partno_len = vpd_data[i + 2];
9722
9723                                 if (partno_len > 24)
9724                                         goto out_not_found;
9725
9726                                 memcpy(tp->board_part_number,
9727                                        &vpd_data[i + 3],
9728                                        partno_len);
9729
9730                                 /* Success. */
9731                                 return;
9732                         }
9733                 }
9734
9735                 /* Part number not found. */
9736                 goto out_not_found;
9737         }
9738
9739 out_not_found:
9740         strcpy(tp->board_part_number, "none");
9741 }
9742
9743 #ifdef CONFIG_SPARC64
9744 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9745 {
9746         struct pci_dev *pdev = tp->pdev;
9747         struct pcidev_cookie *pcp = pdev->sysdata;
9748
9749         if (pcp != NULL) {
9750                 int node = pcp->prom_node;
9751                 u32 venid;
9752                 int err;
9753
9754                 err = prom_getproperty(node, "subsystem-vendor-id",
9755                                        (char *) &venid, sizeof(venid));
9756                 if (err == 0 || err == -1)
9757                         return 0;
9758                 if (venid == PCI_VENDOR_ID_SUN)
9759                         return 1;
9760
9761                 /* TG3 chips onboard the SunBlade-2500 don't have the
9762                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9763                  * are distinguishable from non-Sun variants by being
9764                  * named "network" by the firmware.  Non-Sun cards will
9765                  * show up as being named "ethernet".
9766                  */
9767                 if (!strcmp(pcp->prom_name, "network"))
9768                         return 1;
9769         }
9770         return 0;
9771 }
9772 #endif
9773
9774 static int __devinit tg3_get_invariants(struct tg3 *tp)
9775 {
9776         static struct pci_device_id write_reorder_chipsets[] = {
9777                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9778                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9779                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9780                              PCI_DEVICE_ID_VIA_8385_0) },
9781                 { },
9782         };
9783         u32 misc_ctrl_reg;
9784         u32 cacheline_sz_reg;
9785         u32 pci_state_reg, grc_misc_cfg;
9786         u32 val;
9787         u16 pci_cmd;
9788         int err;
9789
9790 #ifdef CONFIG_SPARC64
9791         if (tg3_is_sun_570X(tp))
9792                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9793 #endif
9794
9795         /* Force memory write invalidate off.  If we leave it on,
9796          * then on 5700_BX chips we have to enable a workaround.
9797          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9798          * to match the cacheline size.  The Broadcom driver have this
9799          * workaround but turns MWI off all the times so never uses
9800          * it.  This seems to suggest that the workaround is insufficient.
9801          */
9802         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9803         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9804         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9805
9806         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9807          * has the register indirect write enable bit set before
9808          * we try to access any of the MMIO registers.  It is also
9809          * critical that the PCI-X hw workaround situation is decided
9810          * before that as well.
9811          */
9812         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9813                               &misc_ctrl_reg);
9814
9815         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9816                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9817
9818         /* Wrong chip ID in 5752 A0. This code can be removed later
9819          * as A0 is not in production.
9820          */
9821         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9822                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9823
9824         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9825          * we need to disable memory and use config. cycles
9826          * only to access all registers. The 5702/03 chips
9827          * can mistakenly decode the special cycles from the
9828          * ICH chipsets as memory write cycles, causing corruption
9829          * of register and memory space. Only certain ICH bridges
9830          * will drive special cycles with non-zero data during the
9831          * address phase which can fall within the 5703's address
9832          * range. This is not an ICH bug as the PCI spec allows
9833          * non-zero address during special cycles. However, only
9834          * these ICH bridges are known to drive non-zero addresses
9835          * during special cycles.
9836          *
9837          * Since special cycles do not cross PCI bridges, we only
9838          * enable this workaround if the 5703 is on the secondary
9839          * bus of these ICH bridges.
9840          */
9841         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9842             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9843                 static struct tg3_dev_id {
9844                         u32     vendor;
9845                         u32     device;
9846                         u32     rev;
9847                 } ich_chipsets[] = {
9848                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9849                           PCI_ANY_ID },
9850                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9851                           PCI_ANY_ID },
9852                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9853                           0xa },
9854                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9855                           PCI_ANY_ID },
9856                         { },
9857                 };
9858                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9859                 struct pci_dev *bridge = NULL;
9860
9861                 while (pci_id->vendor != 0) {
9862                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9863                                                 bridge);
9864                         if (!bridge) {
9865                                 pci_id++;
9866                                 continue;
9867                         }
9868                         if (pci_id->rev != PCI_ANY_ID) {
9869                                 u8 rev;
9870
9871                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9872                                                      &rev);
9873                                 if (rev > pci_id->rev)
9874                                         continue;
9875                         }
9876                         if (bridge->subordinate &&
9877                             (bridge->subordinate->number ==
9878                              tp->pdev->bus->number)) {
9879
9880                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9881                                 pci_dev_put(bridge);
9882                                 break;
9883                         }
9884                 }
9885         }
9886
9887         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9888          * DMA addresses > 40-bit. This bridge may have other additional
9889          * 57xx devices behind it in some 4-port NIC designs for example.
9890          * Any tg3 device found behind the bridge will also need the 40-bit
9891          * DMA workaround.
9892          */
9893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9894             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9895                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9896                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9897                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9898         }
9899         else {
9900                 struct pci_dev *bridge = NULL;
9901
9902                 do {
9903                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9904                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
9905                                                 bridge);
9906                         if (bridge && bridge->subordinate &&
9907                             (bridge->subordinate->number <=
9908                              tp->pdev->bus->number) &&
9909                             (bridge->subordinate->subordinate >=
9910                              tp->pdev->bus->number)) {
9911                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9912                                 pci_dev_put(bridge);
9913                                 break;
9914                         }
9915                 } while (bridge);
9916         }
9917
9918         /* Initialize misc host control in PCI block. */
9919         tp->misc_host_ctrl |= (misc_ctrl_reg &
9920                                MISC_HOST_CTRL_CHIPREV);
9921         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9922                                tp->misc_host_ctrl);
9923
9924         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9925                               &cacheline_sz_reg);
9926
9927         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9928         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9929         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9930         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9931
9932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9935             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9936                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9937
9938         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9939             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9940                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9941
9942         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
9943                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9944                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
9945                 else
9946                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
9947         }
9948
9949         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9950             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9951             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
9952             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
9953                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9954
9955         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9956                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9957
9958         /* If we have an AMD 762 or VIA K8T800 chipset, write
9959          * reordering to the mailbox registers done by the host
9960          * controller can cause major troubles.  We read back from
9961          * every mailbox register write to force the writes to be
9962          * posted to the chip in order.
9963          */
9964         if (pci_dev_present(write_reorder_chipsets) &&
9965             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9966                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9967
9968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9969             tp->pci_lat_timer < 64) {
9970                 tp->pci_lat_timer = 64;
9971
9972                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9973                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9974                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9975                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9976
9977                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9978                                        cacheline_sz_reg);
9979         }
9980
9981         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9982                               &pci_state_reg);
9983
9984         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9985                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9986
9987                 /* If this is a 5700 BX chipset, and we are in PCI-X
9988                  * mode, enable register write workaround.
9989                  *
9990                  * The workaround is to use indirect register accesses
9991                  * for all chip writes not to mailbox registers.
9992                  */
9993                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9994                         u32 pm_reg;
9995                         u16 pci_cmd;
9996
9997                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9998
9999                         /* The chip can have it's power management PCI config
10000                          * space registers clobbered due to this bug.
10001                          * So explicitly force the chip into D0 here.
10002                          */
10003                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10004                                               &pm_reg);
10005                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10006                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10007                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10008                                                pm_reg);
10009
10010                         /* Also, force SERR#/PERR# in PCI command. */
10011                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10012                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10013                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10014                 }
10015         }
10016
10017         /* 5700 BX chips need to have their TX producer index mailboxes
10018          * written twice to workaround a bug.
10019          */
10020         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10021                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10022
10023         /* Back to back register writes can cause problems on this chip,
10024          * the workaround is to read back all reg writes except those to
10025          * mailbox regs.  See tg3_write_indirect_reg32().
10026          *
10027          * PCI Express 5750_A0 rev chips need this workaround too.
10028          */
10029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10030             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10031              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10032                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10033
10034         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10035                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10036         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10037                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10038
10039         /* Chip-specific fixup from Broadcom driver */
10040         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10041             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10042                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10043                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10044         }
10045
10046         /* Default fast path register access methods */
10047         tp->read32 = tg3_read32;
10048         tp->write32 = tg3_write32;
10049         tp->read32_mbox = tg3_read32;
10050         tp->write32_mbox = tg3_write32;
10051         tp->write32_tx_mbox = tg3_write32;
10052         tp->write32_rx_mbox = tg3_write32;
10053
10054         /* Various workaround register access methods */
10055         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10056                 tp->write32 = tg3_write_indirect_reg32;
10057         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10058                 tp->write32 = tg3_write_flush_reg32;
10059
10060         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10061             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10062                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10063                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10064                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10065         }
10066
10067         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10068                 tp->read32 = tg3_read_indirect_reg32;
10069                 tp->write32 = tg3_write_indirect_reg32;
10070                 tp->read32_mbox = tg3_read_indirect_mbox;
10071                 tp->write32_mbox = tg3_write_indirect_mbox;
10072                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10073                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10074
10075                 iounmap(tp->regs);
10076                 tp->regs = NULL;
10077
10078                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10079                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10080                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10081         }
10082
10083         /* Get eeprom hw config before calling tg3_set_power_state().
10084          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10085          * determined before calling tg3_set_power_state() so that
10086          * we know whether or not to switch out of Vaux power.
10087          * When the flag is set, it means that GPIO1 is used for eeprom
10088          * write protect and also implies that it is a LOM where GPIOs
10089          * are not used to switch power.
10090          */ 
10091         tg3_get_eeprom_hw_cfg(tp);
10092
10093         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10094          * GPIO1 driven high will bring 5700's external PHY out of reset.
10095          * It is also used as eeprom write protect on LOMs.
10096          */
10097         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10098         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10099             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10100                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10101                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10102         /* Unused GPIO3 must be driven as output on 5752 because there
10103          * are no pull-up resistors on unused GPIO pins.
10104          */
10105         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10106                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10107
10108         /* Force the chip into D0. */
10109         err = tg3_set_power_state(tp, PCI_D0);
10110         if (err) {
10111                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10112                        pci_name(tp->pdev));
10113                 return err;
10114         }
10115
10116         /* 5700 B0 chips do not support checksumming correctly due
10117          * to hardware bugs.
10118          */
10119         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10120                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10121
10122         /* Pseudo-header checksum is done by hardware logic and not
10123          * the offload processers, so make the chip do the pseudo-
10124          * header checksums on receive.  For transmit it is more
10125          * convenient to do the pseudo-header checksum in software
10126          * as Linux does that on transmit for us in all cases.
10127          */
10128         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10129         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10130
10131         /* Derive initial jumbo mode from MTU assigned in
10132          * ether_setup() via the alloc_etherdev() call
10133          */
10134         if (tp->dev->mtu > ETH_DATA_LEN &&
10135             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10136                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10137
10138         /* Determine WakeOnLan speed to use. */
10139         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10140             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10141             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10142             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10143                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10144         } else {
10145                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10146         }
10147
10148         /* A few boards don't want Ethernet@WireSpeed phy feature */
10149         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10150             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10151              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10152              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10153             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10154                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10155
10156         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10157             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10158                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10159         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10160                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10161
10162         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10163             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10164                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10165
10166         tp->coalesce_mode = 0;
10167         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10168             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10169                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10170
10171         /* Initialize MAC MI mode, polling disabled. */
10172         tw32_f(MAC_MI_MODE, tp->mi_mode);
10173         udelay(80);
10174
10175         /* Initialize data/descriptor byte/word swapping. */
10176         val = tr32(GRC_MODE);
10177         val &= GRC_MODE_HOST_STACKUP;
10178         tw32(GRC_MODE, val | tp->grc_mode);
10179
10180         tg3_switch_clocks(tp);
10181
10182         /* Clear this out for sanity. */
10183         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10184
10185         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10186                               &pci_state_reg);
10187         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10188             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10189                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10190
10191                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10192                     chiprevid == CHIPREV_ID_5701_B0 ||
10193                     chiprevid == CHIPREV_ID_5701_B2 ||
10194                     chiprevid == CHIPREV_ID_5701_B5) {
10195                         void __iomem *sram_base;
10196
10197                         /* Write some dummy words into the SRAM status block
10198                          * area, see if it reads back correctly.  If the return
10199                          * value is bad, force enable the PCIX workaround.
10200                          */
10201                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10202
10203                         writel(0x00000000, sram_base);
10204                         writel(0x00000000, sram_base + 4);
10205                         writel(0xffffffff, sram_base + 4);
10206                         if (readl(sram_base) != 0x00000000)
10207                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10208                 }
10209         }
10210
10211         udelay(50);
10212         tg3_nvram_init(tp);
10213
10214         grc_misc_cfg = tr32(GRC_MISC_CFG);
10215         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10216
10217         /* Broadcom's driver says that CIOBE multisplit has a bug */
10218 #if 0
10219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10220             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10221                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10222                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10223         }
10224 #endif
10225         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10226             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10227              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10228                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10229
10230         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10231             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10232                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10233         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10234                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10235                                       HOSTCC_MODE_CLRTICK_TXBD);
10236
10237                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10238                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10239                                        tp->misc_host_ctrl);
10240         }
10241
10242         /* these are limited to 10/100 only */
10243         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10244              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10245             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10246              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10247              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10248               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10249               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10250             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10251              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10252               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10253                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10254
10255         err = tg3_phy_probe(tp);
10256         if (err) {
10257                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10258                        pci_name(tp->pdev), err);
10259                 /* ... but do not return immediately ... */
10260         }
10261
10262         tg3_read_partno(tp);
10263
10264         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10265                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10266         } else {
10267                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10268                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10269                 else
10270                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10271         }
10272
10273         /* 5700 {AX,BX} chips have a broken status block link
10274          * change bit implementation, so we must use the
10275          * status register in those cases.
10276          */
10277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10278                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10279         else
10280                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10281
10282         /* The led_ctrl is set during tg3_phy_probe, here we might
10283          * have to force the link status polling mechanism based
10284          * upon subsystem IDs.
10285          */
10286         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10287             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10288                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10289                                   TG3_FLAG_USE_LINKCHG_REG);
10290         }
10291
10292         /* For all SERDES we poll the MAC status register. */
10293         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10294                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10295         else
10296                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10297
10298         /* All chips before 5787 can get confused if TX buffers
10299          * straddle the 4GB address boundary in some cases.
10300          */
10301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10302                 tp->dev->hard_start_xmit = tg3_start_xmit;
10303         else
10304                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10305
10306         tp->rx_offset = 2;
10307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10308             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10309                 tp->rx_offset = 0;
10310
10311         /* By default, disable wake-on-lan.  User can change this
10312          * using ETHTOOL_SWOL.
10313          */
10314         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10315
10316         return err;
10317 }
10318
10319 #ifdef CONFIG_SPARC64
10320 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10321 {
10322         struct net_device *dev = tp->dev;
10323         struct pci_dev *pdev = tp->pdev;
10324         struct pcidev_cookie *pcp = pdev->sysdata;
10325
10326         if (pcp != NULL) {
10327                 int node = pcp->prom_node;
10328
10329                 if (prom_getproplen(node, "local-mac-address") == 6) {
10330                         prom_getproperty(node, "local-mac-address",
10331                                          dev->dev_addr, 6);
10332                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10333                         return 0;
10334                 }
10335         }
10336         return -ENODEV;
10337 }
10338
10339 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10340 {
10341         struct net_device *dev = tp->dev;
10342
10343         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10344         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10345         return 0;
10346 }
10347 #endif
10348
10349 static int __devinit tg3_get_device_address(struct tg3 *tp)
10350 {
10351         struct net_device *dev = tp->dev;
10352         u32 hi, lo, mac_offset;
10353
10354 #ifdef CONFIG_SPARC64
10355         if (!tg3_get_macaddr_sparc(tp))
10356                 return 0;
10357 #endif
10358
10359         mac_offset = 0x7c;
10360         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10361              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10362             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10363                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10364                         mac_offset = 0xcc;
10365                 if (tg3_nvram_lock(tp))
10366                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10367                 else
10368                         tg3_nvram_unlock(tp);
10369         }
10370
10371         /* First try to get it from MAC address mailbox. */
10372         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10373         if ((hi >> 16) == 0x484b) {
10374                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10375                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10376
10377                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10378                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10379                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10380                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10381                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10382         }
10383         /* Next, try NVRAM. */
10384         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10385                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10386                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10387                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10388                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10389                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10390                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10391                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10392                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10393         }
10394         /* Finally just fetch it out of the MAC control regs. */
10395         else {
10396                 hi = tr32(MAC_ADDR_0_HIGH);
10397                 lo = tr32(MAC_ADDR_0_LOW);
10398
10399                 dev->dev_addr[5] = lo & 0xff;
10400                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10401                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10402                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10403                 dev->dev_addr[1] = hi & 0xff;
10404                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10405         }
10406
10407         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10408 #ifdef CONFIG_SPARC64
10409                 if (!tg3_get_default_macaddr_sparc(tp))
10410                         return 0;
10411 #endif
10412                 return -EINVAL;
10413         }
10414         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10415         return 0;
10416 }
10417
10418 #define BOUNDARY_SINGLE_CACHELINE       1
10419 #define BOUNDARY_MULTI_CACHELINE        2
10420
10421 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10422 {
10423         int cacheline_size;
10424         u8 byte;
10425         int goal;
10426
10427         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10428         if (byte == 0)
10429                 cacheline_size = 1024;
10430         else
10431                 cacheline_size = (int) byte * 4;
10432
10433         /* On 5703 and later chips, the boundary bits have no
10434          * effect.
10435          */
10436         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10437             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10438             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10439                 goto out;
10440
10441 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10442         goal = BOUNDARY_MULTI_CACHELINE;
10443 #else
10444 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10445         goal = BOUNDARY_SINGLE_CACHELINE;
10446 #else
10447         goal = 0;
10448 #endif
10449 #endif
10450
10451         if (!goal)
10452                 goto out;
10453
10454         /* PCI controllers on most RISC systems tend to disconnect
10455          * when a device tries to burst across a cache-line boundary.
10456          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10457          *
10458          * Unfortunately, for PCI-E there are only limited
10459          * write-side controls for this, and thus for reads
10460          * we will still get the disconnects.  We'll also waste
10461          * these PCI cycles for both read and write for chips
10462          * other than 5700 and 5701 which do not implement the
10463          * boundary bits.
10464          */
10465         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10466             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10467                 switch (cacheline_size) {
10468                 case 16:
10469                 case 32:
10470                 case 64:
10471                 case 128:
10472                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10473                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10474                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10475                         } else {
10476                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10477                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10478                         }
10479                         break;
10480
10481                 case 256:
10482                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10483                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10484                         break;
10485
10486                 default:
10487                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10488                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10489                         break;
10490                 };
10491         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10492                 switch (cacheline_size) {
10493                 case 16:
10494                 case 32:
10495                 case 64:
10496                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10497                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10498                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10499                                 break;
10500                         }
10501                         /* fallthrough */
10502                 case 128:
10503                 default:
10504                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10505                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10506                         break;
10507                 };
10508         } else {
10509                 switch (cacheline_size) {
10510                 case 16:
10511                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10512                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10513                                         DMA_RWCTRL_WRITE_BNDRY_16);
10514                                 break;
10515                         }
10516                         /* fallthrough */
10517                 case 32:
10518                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10519                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10520                                         DMA_RWCTRL_WRITE_BNDRY_32);
10521                                 break;
10522                         }
10523                         /* fallthrough */
10524                 case 64:
10525                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10526                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10527                                         DMA_RWCTRL_WRITE_BNDRY_64);
10528                                 break;
10529                         }
10530                         /* fallthrough */
10531                 case 128:
10532                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10533                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10534                                         DMA_RWCTRL_WRITE_BNDRY_128);
10535                                 break;
10536                         }
10537                         /* fallthrough */
10538                 case 256:
10539                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10540                                 DMA_RWCTRL_WRITE_BNDRY_256);
10541                         break;
10542                 case 512:
10543                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10544                                 DMA_RWCTRL_WRITE_BNDRY_512);
10545                         break;
10546                 case 1024:
10547                 default:
10548                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10549                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10550                         break;
10551                 };
10552         }
10553
10554 out:
10555         return val;
10556 }
10557
10558 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10559 {
10560         struct tg3_internal_buffer_desc test_desc;
10561         u32 sram_dma_descs;
10562         int i, ret;
10563
10564         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10565
10566         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10567         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10568         tw32(RDMAC_STATUS, 0);
10569         tw32(WDMAC_STATUS, 0);
10570
10571         tw32(BUFMGR_MODE, 0);
10572         tw32(FTQ_RESET, 0);
10573
10574         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10575         test_desc.addr_lo = buf_dma & 0xffffffff;
10576         test_desc.nic_mbuf = 0x00002100;
10577         test_desc.len = size;
10578
10579         /*
10580          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10581          * the *second* time the tg3 driver was getting loaded after an
10582          * initial scan.
10583          *
10584          * Broadcom tells me:
10585          *   ...the DMA engine is connected to the GRC block and a DMA
10586          *   reset may affect the GRC block in some unpredictable way...
10587          *   The behavior of resets to individual blocks has not been tested.
10588          *
10589          * Broadcom noted the GRC reset will also reset all sub-components.
10590          */
10591         if (to_device) {
10592                 test_desc.cqid_sqid = (13 << 8) | 2;
10593
10594                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10595                 udelay(40);
10596         } else {
10597                 test_desc.cqid_sqid = (16 << 8) | 7;
10598
10599                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10600                 udelay(40);
10601         }
10602         test_desc.flags = 0x00000005;
10603
10604         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10605                 u32 val;
10606
10607                 val = *(((u32 *)&test_desc) + i);
10608                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10609                                        sram_dma_descs + (i * sizeof(u32)));
10610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10611         }
10612         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10613
10614         if (to_device) {
10615                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10616         } else {
10617                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10618         }
10619
10620         ret = -ENODEV;
10621         for (i = 0; i < 40; i++) {
10622                 u32 val;
10623
10624                 if (to_device)
10625                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10626                 else
10627                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10628                 if ((val & 0xffff) == sram_dma_descs) {
10629                         ret = 0;
10630                         break;
10631                 }
10632
10633                 udelay(100);
10634         }
10635
10636         return ret;
10637 }
10638
10639 #define TEST_BUFFER_SIZE        0x2000
10640
10641 static int __devinit tg3_test_dma(struct tg3 *tp)
10642 {
10643         dma_addr_t buf_dma;
10644         u32 *buf, saved_dma_rwctrl;
10645         int ret;
10646
10647         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10648         if (!buf) {
10649                 ret = -ENOMEM;
10650                 goto out_nofree;
10651         }
10652
10653         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10654                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10655
10656         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10657
10658         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10659                 /* DMA read watermark not used on PCIE */
10660                 tp->dma_rwctrl |= 0x00180000;
10661         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10663                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10664                         tp->dma_rwctrl |= 0x003f0000;
10665                 else
10666                         tp->dma_rwctrl |= 0x003f000f;
10667         } else {
10668                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10669                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10670                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10671
10672                         /* If the 5704 is behind the EPB bridge, we can
10673                          * do the less restrictive ONE_DMA workaround for
10674                          * better performance.
10675                          */
10676                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10677                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10678                                 tp->dma_rwctrl |= 0x8000;
10679                         else if (ccval == 0x6 || ccval == 0x7)
10680                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10681
10682                         /* Set bit 23 to enable PCIX hw bug fix */
10683                         tp->dma_rwctrl |= 0x009f0000;
10684                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10685                         /* 5780 always in PCIX mode */
10686                         tp->dma_rwctrl |= 0x00144000;
10687                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10688                         /* 5714 always in PCIX mode */
10689                         tp->dma_rwctrl |= 0x00148000;
10690                 } else {
10691                         tp->dma_rwctrl |= 0x001b000f;
10692                 }
10693         }
10694
10695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10696             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10697                 tp->dma_rwctrl &= 0xfffffff0;
10698
10699         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10701                 /* Remove this if it causes problems for some boards. */
10702                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10703
10704                 /* On 5700/5701 chips, we need to set this bit.
10705                  * Otherwise the chip will issue cacheline transactions
10706                  * to streamable DMA memory with not all the byte
10707                  * enables turned on.  This is an error on several
10708                  * RISC PCI controllers, in particular sparc64.
10709                  *
10710                  * On 5703/5704 chips, this bit has been reassigned
10711                  * a different meaning.  In particular, it is used
10712                  * on those chips to enable a PCI-X workaround.
10713                  */
10714                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10715         }
10716
10717         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10718
10719 #if 0
10720         /* Unneeded, already done by tg3_get_invariants.  */
10721         tg3_switch_clocks(tp);
10722 #endif
10723
10724         ret = 0;
10725         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10726             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10727                 goto out;
10728
10729         /* It is best to perform DMA test with maximum write burst size
10730          * to expose the 5700/5701 write DMA bug.
10731          */
10732         saved_dma_rwctrl = tp->dma_rwctrl;
10733         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10734         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10735
10736         while (1) {
10737                 u32 *p = buf, i;
10738
10739                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10740                         p[i] = i;
10741
10742                 /* Send the buffer to the chip. */
10743                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10744                 if (ret) {
10745                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10746                         break;
10747                 }
10748
10749 #if 0
10750                 /* validate data reached card RAM correctly. */
10751                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10752                         u32 val;
10753                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10754                         if (le32_to_cpu(val) != p[i]) {
10755                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10756                                 /* ret = -ENODEV here? */
10757                         }
10758                         p[i] = 0;
10759                 }
10760 #endif
10761                 /* Now read it back. */
10762                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10763                 if (ret) {
10764                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10765
10766                         break;
10767                 }
10768
10769                 /* Verify it. */
10770                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10771                         if (p[i] == i)
10772                                 continue;
10773
10774                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10775                             DMA_RWCTRL_WRITE_BNDRY_16) {
10776                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10777                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10778                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10779                                 break;
10780                         } else {
10781                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10782                                 ret = -ENODEV;
10783                                 goto out;
10784                         }
10785                 }
10786
10787                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10788                         /* Success. */
10789                         ret = 0;
10790                         break;
10791                 }
10792         }
10793         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10794             DMA_RWCTRL_WRITE_BNDRY_16) {
10795                 static struct pci_device_id dma_wait_state_chipsets[] = {
10796                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10797                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10798                         { },
10799                 };
10800
10801                 /* DMA test passed without adjusting DMA boundary,
10802                  * now look for chipsets that are known to expose the
10803                  * DMA bug without failing the test.
10804                  */
10805                 if (pci_dev_present(dma_wait_state_chipsets)) {
10806                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10807                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10808                 }
10809                 else
10810                         /* Safe to use the calculated DMA boundary. */
10811                         tp->dma_rwctrl = saved_dma_rwctrl;
10812
10813                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10814         }
10815
10816 out:
10817         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10818 out_nofree:
10819         return ret;
10820 }
10821
10822 static void __devinit tg3_init_link_config(struct tg3 *tp)
10823 {
10824         tp->link_config.advertising =
10825                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10826                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10827                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10828                  ADVERTISED_Autoneg | ADVERTISED_MII);
10829         tp->link_config.speed = SPEED_INVALID;
10830         tp->link_config.duplex = DUPLEX_INVALID;
10831         tp->link_config.autoneg = AUTONEG_ENABLE;
10832         netif_carrier_off(tp->dev);
10833         tp->link_config.active_speed = SPEED_INVALID;
10834         tp->link_config.active_duplex = DUPLEX_INVALID;
10835         tp->link_config.phy_is_low_power = 0;
10836         tp->link_config.orig_speed = SPEED_INVALID;
10837         tp->link_config.orig_duplex = DUPLEX_INVALID;
10838         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10839 }
10840
10841 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10842 {
10843         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10844                 tp->bufmgr_config.mbuf_read_dma_low_water =
10845                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10846                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10847                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10848                 tp->bufmgr_config.mbuf_high_water =
10849                         DEFAULT_MB_HIGH_WATER_5705;
10850
10851                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10852                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10853                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10854                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10855                 tp->bufmgr_config.mbuf_high_water_jumbo =
10856                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10857         } else {
10858                 tp->bufmgr_config.mbuf_read_dma_low_water =
10859                         DEFAULT_MB_RDMA_LOW_WATER;
10860                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10861                         DEFAULT_MB_MACRX_LOW_WATER;
10862                 tp->bufmgr_config.mbuf_high_water =
10863                         DEFAULT_MB_HIGH_WATER;
10864
10865                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10866                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10867                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10868                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10869                 tp->bufmgr_config.mbuf_high_water_jumbo =
10870                         DEFAULT_MB_HIGH_WATER_JUMBO;
10871         }
10872
10873         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10874         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10875 }
10876
10877 static char * __devinit tg3_phy_string(struct tg3 *tp)
10878 {
10879         switch (tp->phy_id & PHY_ID_MASK) {
10880         case PHY_ID_BCM5400:    return "5400";
10881         case PHY_ID_BCM5401:    return "5401";
10882         case PHY_ID_BCM5411:    return "5411";
10883         case PHY_ID_BCM5701:    return "5701";
10884         case PHY_ID_BCM5703:    return "5703";
10885         case PHY_ID_BCM5704:    return "5704";
10886         case PHY_ID_BCM5705:    return "5705";
10887         case PHY_ID_BCM5750:    return "5750";
10888         case PHY_ID_BCM5752:    return "5752";
10889         case PHY_ID_BCM5714:    return "5714";
10890         case PHY_ID_BCM5780:    return "5780";
10891         case PHY_ID_BCM5787:    return "5787";
10892         case PHY_ID_BCM8002:    return "8002/serdes";
10893         case 0:                 return "serdes";
10894         default:                return "unknown";
10895         };
10896 }
10897
10898 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10899 {
10900         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10901                 strcpy(str, "PCI Express");
10902                 return str;
10903         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10904                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10905
10906                 strcpy(str, "PCIX:");
10907
10908                 if ((clock_ctrl == 7) ||
10909                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10910                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10911                         strcat(str, "133MHz");
10912                 else if (clock_ctrl == 0)
10913                         strcat(str, "33MHz");
10914                 else if (clock_ctrl == 2)
10915                         strcat(str, "50MHz");
10916                 else if (clock_ctrl == 4)
10917                         strcat(str, "66MHz");
10918                 else if (clock_ctrl == 6)
10919                         strcat(str, "100MHz");
10920         } else {
10921                 strcpy(str, "PCI:");
10922                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10923                         strcat(str, "66MHz");
10924                 else
10925                         strcat(str, "33MHz");
10926         }
10927         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10928                 strcat(str, ":32-bit");
10929         else
10930                 strcat(str, ":64-bit");
10931         return str;
10932 }
10933
10934 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10935 {
10936         struct pci_dev *peer;
10937         unsigned int func, devnr = tp->pdev->devfn & ~7;
10938
10939         for (func = 0; func < 8; func++) {
10940                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10941                 if (peer && peer != tp->pdev)
10942                         break;
10943                 pci_dev_put(peer);
10944         }
10945         /* 5704 can be configured in single-port mode, set peer to
10946          * tp->pdev in that case.
10947          */
10948         if (!peer) {
10949                 peer = tp->pdev;
10950                 return peer;
10951         }
10952
10953         /*
10954          * We don't need to keep the refcount elevated; there's no way
10955          * to remove one half of this device without removing the other
10956          */
10957         pci_dev_put(peer);
10958
10959         return peer;
10960 }
10961
10962 static void __devinit tg3_init_coal(struct tg3 *tp)
10963 {
10964         struct ethtool_coalesce *ec = &tp->coal;
10965
10966         memset(ec, 0, sizeof(*ec));
10967         ec->cmd = ETHTOOL_GCOALESCE;
10968         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10969         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10970         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10971         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10972         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10973         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10974         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10975         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10976         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10977
10978         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10979                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10980                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10981                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10982                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10983                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10984         }
10985
10986         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10987                 ec->rx_coalesce_usecs_irq = 0;
10988                 ec->tx_coalesce_usecs_irq = 0;
10989                 ec->stats_block_coalesce_usecs = 0;
10990         }
10991 }
10992
10993 static int __devinit tg3_init_one(struct pci_dev *pdev,
10994                                   const struct pci_device_id *ent)
10995 {
10996         static int tg3_version_printed = 0;
10997         unsigned long tg3reg_base, tg3reg_len;
10998         struct net_device *dev;
10999         struct tg3 *tp;
11000         int i, err, pm_cap;
11001         char str[40];
11002         u64 dma_mask, persist_dma_mask;
11003
11004         if (tg3_version_printed++ == 0)
11005                 printk(KERN_INFO "%s", version);
11006
11007         err = pci_enable_device(pdev);
11008         if (err) {
11009                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11010                        "aborting.\n");
11011                 return err;
11012         }
11013
11014         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11015                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11016                        "base address, aborting.\n");
11017                 err = -ENODEV;
11018                 goto err_out_disable_pdev;
11019         }
11020
11021         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11022         if (err) {
11023                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11024                        "aborting.\n");
11025                 goto err_out_disable_pdev;
11026         }
11027
11028         pci_set_master(pdev);
11029
11030         /* Find power-management capability. */
11031         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11032         if (pm_cap == 0) {
11033                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11034                        "aborting.\n");
11035                 err = -EIO;
11036                 goto err_out_free_res;
11037         }
11038
11039         tg3reg_base = pci_resource_start(pdev, 0);
11040         tg3reg_len = pci_resource_len(pdev, 0);
11041
11042         dev = alloc_etherdev(sizeof(*tp));
11043         if (!dev) {
11044                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11045                 err = -ENOMEM;
11046                 goto err_out_free_res;
11047         }
11048
11049         SET_MODULE_OWNER(dev);
11050         SET_NETDEV_DEV(dev, &pdev->dev);
11051
11052         dev->features |= NETIF_F_LLTX;
11053 #if TG3_VLAN_TAG_USED
11054         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11055         dev->vlan_rx_register = tg3_vlan_rx_register;
11056         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11057 #endif
11058
11059         tp = netdev_priv(dev);
11060         tp->pdev = pdev;
11061         tp->dev = dev;
11062         tp->pm_cap = pm_cap;
11063         tp->mac_mode = TG3_DEF_MAC_MODE;
11064         tp->rx_mode = TG3_DEF_RX_MODE;
11065         tp->tx_mode = TG3_DEF_TX_MODE;
11066         tp->mi_mode = MAC_MI_MODE_BASE;
11067         if (tg3_debug > 0)
11068                 tp->msg_enable = tg3_debug;
11069         else
11070                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11071
11072         /* The word/byte swap controls here control register access byte
11073          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11074          * setting below.
11075          */
11076         tp->misc_host_ctrl =
11077                 MISC_HOST_CTRL_MASK_PCI_INT |
11078                 MISC_HOST_CTRL_WORD_SWAP |
11079                 MISC_HOST_CTRL_INDIR_ACCESS |
11080                 MISC_HOST_CTRL_PCISTATE_RW;
11081
11082         /* The NONFRM (non-frame) byte/word swap controls take effect
11083          * on descriptor entries, anything which isn't packet data.
11084          *
11085          * The StrongARM chips on the board (one for tx, one for rx)
11086          * are running in big-endian mode.
11087          */
11088         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11089                         GRC_MODE_WSWAP_NONFRM_DATA);
11090 #ifdef __BIG_ENDIAN
11091         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11092 #endif
11093         spin_lock_init(&tp->lock);
11094         spin_lock_init(&tp->tx_lock);
11095         spin_lock_init(&tp->indirect_lock);
11096         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11097
11098         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11099         if (tp->regs == 0UL) {
11100                 printk(KERN_ERR PFX "Cannot map device registers, "
11101                        "aborting.\n");
11102                 err = -ENOMEM;
11103                 goto err_out_free_dev;
11104         }
11105
11106         tg3_init_link_config(tp);
11107
11108         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11109         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11110         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11111
11112         dev->open = tg3_open;
11113         dev->stop = tg3_close;
11114         dev->get_stats = tg3_get_stats;
11115         dev->set_multicast_list = tg3_set_rx_mode;
11116         dev->set_mac_address = tg3_set_mac_addr;
11117         dev->do_ioctl = tg3_ioctl;
11118         dev->tx_timeout = tg3_tx_timeout;
11119         dev->poll = tg3_poll;
11120         dev->ethtool_ops = &tg3_ethtool_ops;
11121         dev->weight = 64;
11122         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11123         dev->change_mtu = tg3_change_mtu;
11124         dev->irq = pdev->irq;
11125 #ifdef CONFIG_NET_POLL_CONTROLLER
11126         dev->poll_controller = tg3_poll_controller;
11127 #endif
11128
11129         err = tg3_get_invariants(tp);
11130         if (err) {
11131                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11132                        "aborting.\n");
11133                 goto err_out_iounmap;
11134         }
11135
11136         /* The EPB bridge inside 5714, 5715, and 5780 and any
11137          * device behind the EPB cannot support DMA addresses > 40-bit.
11138          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11139          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11140          * do DMA address check in tg3_start_xmit().
11141          */
11142         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11143                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11144         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11145                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11146 #ifdef CONFIG_HIGHMEM
11147                 dma_mask = DMA_64BIT_MASK;
11148 #endif
11149         } else
11150                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11151
11152         /* Configure DMA attributes. */
11153         if (dma_mask > DMA_32BIT_MASK) {
11154                 err = pci_set_dma_mask(pdev, dma_mask);
11155                 if (!err) {
11156                         dev->features |= NETIF_F_HIGHDMA;
11157                         err = pci_set_consistent_dma_mask(pdev,
11158                                                           persist_dma_mask);
11159                         if (err < 0) {
11160                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11161                                        "DMA for consistent allocations\n");
11162                                 goto err_out_iounmap;
11163                         }
11164                 }
11165         }
11166         if (err || dma_mask == DMA_32BIT_MASK) {
11167                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11168                 if (err) {
11169                         printk(KERN_ERR PFX "No usable DMA configuration, "
11170                                "aborting.\n");
11171                         goto err_out_iounmap;
11172                 }
11173         }
11174
11175         tg3_init_bufmgr_config(tp);
11176
11177 #if TG3_TSO_SUPPORT != 0
11178         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11179                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11180         }
11181         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11183             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11184             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11185                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11186         } else {
11187                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11188         }
11189
11190         /* TSO is on by default on chips that support hardware TSO.
11191          * Firmware TSO on older chips gives lower performance, so it
11192          * is off by default, but can be enabled using ethtool.
11193          */
11194         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11195                 dev->features |= NETIF_F_TSO;
11196
11197 #endif
11198
11199         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11200             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11201             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11202                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11203                 tp->rx_pending = 63;
11204         }
11205
11206         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11207             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11208                 tp->pdev_peer = tg3_find_peer(tp);
11209
11210         err = tg3_get_device_address(tp);
11211         if (err) {
11212                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11213                        "aborting.\n");
11214                 goto err_out_iounmap;
11215         }
11216
11217         /*
11218          * Reset chip in case UNDI or EFI driver did not shutdown
11219          * DMA self test will enable WDMAC and we'll see (spurious)
11220          * pending DMA on the PCI bus at that point.
11221          */
11222         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11223             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11224                 pci_save_state(tp->pdev);
11225                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11226                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11227         }
11228
11229         err = tg3_test_dma(tp);
11230         if (err) {
11231                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11232                 goto err_out_iounmap;
11233         }
11234
11235         /* Tigon3 can do ipv4 only... and some chips have buggy
11236          * checksumming.
11237          */
11238         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11239                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
11240                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11241         } else
11242                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11243
11244         /* flow control autonegotiation is default behavior */
11245         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11246
11247         tg3_init_coal(tp);
11248
11249         /* Now that we have fully setup the chip, save away a snapshot
11250          * of the PCI config space.  We need to restore this after
11251          * GRC_MISC_CFG core clock resets and some resume events.
11252          */
11253         pci_save_state(tp->pdev);
11254
11255         err = register_netdev(dev);
11256         if (err) {
11257                 printk(KERN_ERR PFX "Cannot register net device, "
11258                        "aborting.\n");
11259                 goto err_out_iounmap;
11260         }
11261
11262         pci_set_drvdata(pdev, dev);
11263
11264         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11265                dev->name,
11266                tp->board_part_number,
11267                tp->pci_chip_rev_id,
11268                tg3_phy_string(tp),
11269                tg3_bus_string(tp, str),
11270                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11271
11272         for (i = 0; i < 6; i++)
11273                 printk("%2.2x%c", dev->dev_addr[i],
11274                        i == 5 ? '\n' : ':');
11275
11276         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11277                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11278                "TSOcap[%d] \n",
11279                dev->name,
11280                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11281                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11282                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11283                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11284                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11285                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11286                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11287         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11288                dev->name, tp->dma_rwctrl,
11289                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11290                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11291
11292         return 0;
11293
11294 err_out_iounmap:
11295         if (tp->regs) {
11296                 iounmap(tp->regs);
11297                 tp->regs = NULL;
11298         }
11299
11300 err_out_free_dev:
11301         free_netdev(dev);
11302
11303 err_out_free_res:
11304         pci_release_regions(pdev);
11305
11306 err_out_disable_pdev:
11307         pci_disable_device(pdev);
11308         pci_set_drvdata(pdev, NULL);
11309         return err;
11310 }
11311
11312 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11313 {
11314         struct net_device *dev = pci_get_drvdata(pdev);
11315
11316         if (dev) {
11317                 struct tg3 *tp = netdev_priv(dev);
11318
11319                 flush_scheduled_work();
11320                 unregister_netdev(dev);
11321                 if (tp->regs) {
11322                         iounmap(tp->regs);
11323                         tp->regs = NULL;
11324                 }
11325                 free_netdev(dev);
11326                 pci_release_regions(pdev);
11327                 pci_disable_device(pdev);
11328                 pci_set_drvdata(pdev, NULL);
11329         }
11330 }
11331
11332 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11333 {
11334         struct net_device *dev = pci_get_drvdata(pdev);
11335         struct tg3 *tp = netdev_priv(dev);
11336         int err;
11337
11338         if (!netif_running(dev))
11339                 return 0;
11340
11341         flush_scheduled_work();
11342         tg3_netif_stop(tp);
11343
11344         del_timer_sync(&tp->timer);
11345
11346         tg3_full_lock(tp, 1);
11347         tg3_disable_ints(tp);
11348         tg3_full_unlock(tp);
11349
11350         netif_device_detach(dev);
11351
11352         tg3_full_lock(tp, 0);
11353         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11354         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11355         tg3_full_unlock(tp);
11356
11357         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11358         if (err) {
11359                 tg3_full_lock(tp, 0);
11360
11361                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11362                 tg3_init_hw(tp);
11363
11364                 tp->timer.expires = jiffies + tp->timer_offset;
11365                 add_timer(&tp->timer);
11366
11367                 netif_device_attach(dev);
11368                 tg3_netif_start(tp);
11369
11370                 tg3_full_unlock(tp);
11371         }
11372
11373         return err;
11374 }
11375
11376 static int tg3_resume(struct pci_dev *pdev)
11377 {
11378         struct net_device *dev = pci_get_drvdata(pdev);
11379         struct tg3 *tp = netdev_priv(dev);
11380         int err;
11381
11382         if (!netif_running(dev))
11383                 return 0;
11384
11385         pci_restore_state(tp->pdev);
11386
11387         err = tg3_set_power_state(tp, PCI_D0);
11388         if (err)
11389                 return err;
11390
11391         netif_device_attach(dev);
11392
11393         tg3_full_lock(tp, 0);
11394
11395         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11396         tg3_init_hw(tp);
11397
11398         tp->timer.expires = jiffies + tp->timer_offset;
11399         add_timer(&tp->timer);
11400
11401         tg3_netif_start(tp);
11402
11403         tg3_full_unlock(tp);
11404
11405         return 0;
11406 }
11407
11408 static struct pci_driver tg3_driver = {
11409         .name           = DRV_MODULE_NAME,
11410         .id_table       = tg3_pci_tbl,
11411         .probe          = tg3_init_one,
11412         .remove         = __devexit_p(tg3_remove_one),
11413         .suspend        = tg3_suspend,
11414         .resume         = tg3_resume
11415 };
11416
11417 static int __init tg3_init(void)
11418 {
11419         return pci_module_init(&tg3_driver);
11420 }
11421
11422 static void __exit tg3_cleanup(void)
11423 {
11424         pci_unregister_driver(&tg3_driver);
11425 }
11426
11427 module_init(tg3_init);
11428 module_exit(tg3_cleanup);