Merge branch 'origin'
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.49"
73 #define DRV_MODULE_RELDATE      "Feb 2, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { 0, }
249 };
250
251 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
252
253 static struct {
254         const char string[ETH_GSTRING_LEN];
255 } ethtool_stats_keys[TG3_NUM_STATS] = {
256         { "rx_octets" },
257         { "rx_fragments" },
258         { "rx_ucast_packets" },
259         { "rx_mcast_packets" },
260         { "rx_bcast_packets" },
261         { "rx_fcs_errors" },
262         { "rx_align_errors" },
263         { "rx_xon_pause_rcvd" },
264         { "rx_xoff_pause_rcvd" },
265         { "rx_mac_ctrl_rcvd" },
266         { "rx_xoff_entered" },
267         { "rx_frame_too_long_errors" },
268         { "rx_jabbers" },
269         { "rx_undersize_packets" },
270         { "rx_in_length_errors" },
271         { "rx_out_length_errors" },
272         { "rx_64_or_less_octet_packets" },
273         { "rx_65_to_127_octet_packets" },
274         { "rx_128_to_255_octet_packets" },
275         { "rx_256_to_511_octet_packets" },
276         { "rx_512_to_1023_octet_packets" },
277         { "rx_1024_to_1522_octet_packets" },
278         { "rx_1523_to_2047_octet_packets" },
279         { "rx_2048_to_4095_octet_packets" },
280         { "rx_4096_to_8191_octet_packets" },
281         { "rx_8192_to_9022_octet_packets" },
282
283         { "tx_octets" },
284         { "tx_collisions" },
285
286         { "tx_xon_sent" },
287         { "tx_xoff_sent" },
288         { "tx_flow_control" },
289         { "tx_mac_errors" },
290         { "tx_single_collisions" },
291         { "tx_mult_collisions" },
292         { "tx_deferred" },
293         { "tx_excessive_collisions" },
294         { "tx_late_collisions" },
295         { "tx_collide_2times" },
296         { "tx_collide_3times" },
297         { "tx_collide_4times" },
298         { "tx_collide_5times" },
299         { "tx_collide_6times" },
300         { "tx_collide_7times" },
301         { "tx_collide_8times" },
302         { "tx_collide_9times" },
303         { "tx_collide_10times" },
304         { "tx_collide_11times" },
305         { "tx_collide_12times" },
306         { "tx_collide_13times" },
307         { "tx_collide_14times" },
308         { "tx_collide_15times" },
309         { "tx_ucast_packets" },
310         { "tx_mcast_packets" },
311         { "tx_bcast_packets" },
312         { "tx_carrier_sense_errors" },
313         { "tx_discards" },
314         { "tx_errors" },
315
316         { "dma_writeq_full" },
317         { "dma_write_prioq_full" },
318         { "rxbds_empty" },
319         { "rx_discards" },
320         { "rx_errors" },
321         { "rx_threshold_hit" },
322
323         { "dma_readq_full" },
324         { "dma_read_prioq_full" },
325         { "tx_comp_queue_full" },
326
327         { "ring_set_send_prod_index" },
328         { "ring_status_update" },
329         { "nic_irqs" },
330         { "nic_avoided_irqs" },
331         { "nic_tx_threshold_hit" }
332 };
333
334 static struct {
335         const char string[ETH_GSTRING_LEN];
336 } ethtool_test_keys[TG3_NUM_TEST] = {
337         { "nvram test     (online) " },
338         { "link test      (online) " },
339         { "register test  (offline)" },
340         { "memory test    (offline)" },
341         { "loopback test  (offline)" },
342         { "interrupt test (offline)" },
343 };
344
345 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
346 {
347         writel(val, tp->regs + off);
348 }
349
350 static u32 tg3_read32(struct tg3 *tp, u32 off)
351 {
352         return (readl(tp->regs + off)); 
353 }
354
355 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
356 {
357         unsigned long flags;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 }
364
365 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
366 {
367         writel(val, tp->regs + off);
368         readl(tp->regs + off);
369 }
370
371 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
372 {
373         unsigned long flags;
374         u32 val;
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380         return val;
381 }
382
383 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
384 {
385         unsigned long flags;
386
387         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
388                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
389                                        TG3_64BIT_REG_LOW, val);
390                 return;
391         }
392         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
393                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
394                                        TG3_64BIT_REG_LOW, val);
395                 return;
396         }
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
401         spin_unlock_irqrestore(&tp->indirect_lock, flags);
402
403         /* In indirect mode when disabling interrupts, we also need
404          * to clear the interrupt bit in the GRC local ctrl register.
405          */
406         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
407             (val == 0x1)) {
408                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
409                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
410         }
411 }
412
413 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
414 {
415         unsigned long flags;
416         u32 val;
417
418         spin_lock_irqsave(&tp->indirect_lock, flags);
419         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
420         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
421         spin_unlock_irqrestore(&tp->indirect_lock, flags);
422         return val;
423 }
424
425 /* usec_wait specifies the wait time in usec when writing to certain registers
426  * where it is unsafe to read back the register without some delay.
427  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
428  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
429  */
430 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
431 {
432         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
433             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
434                 /* Non-posted methods */
435                 tp->write32(tp, off, val);
436         else {
437                 /* Posted method */
438                 tg3_write32(tp, off, val);
439                 if (usec_wait)
440                         udelay(usec_wait);
441                 tp->read32(tp, off);
442         }
443         /* Wait again after the read for the posted method to guarantee that
444          * the wait time is met.
445          */
446         if (usec_wait)
447                 udelay(usec_wait);
448 }
449
450 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
451 {
452         tp->write32_mbox(tp, off, val);
453         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
454             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
455                 tp->read32_mbox(tp, off);
456 }
457
458 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
459 {
460         void __iomem *mbox = tp->regs + off;
461         writel(val, mbox);
462         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
463                 writel(val, mbox);
464         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
465                 readl(mbox);
466 }
467
468 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
469 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
470 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
471 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
472 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
473
474 #define tw32(reg,val)           tp->write32(tp, reg, val)
475 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
476 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
477 #define tr32(reg)               tp->read32(tp, reg)
478
479 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
480 {
481         unsigned long flags;
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
486
487         /* Always leave this as zero. */
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
493 {
494         /* If no workaround is needed, write to mem space directly */
495         if (tp->write32 != tg3_write_indirect_reg32)
496                 tw32(NIC_SRAM_WIN_BASE + off, val);
497         else
498                 tg3_write_mem(tp, off, val);
499 }
500
501 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
502 {
503         unsigned long flags;
504
505         spin_lock_irqsave(&tp->indirect_lock, flags);
506         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
508
509         /* Always leave this as zero. */
510         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         spin_unlock_irqrestore(&tp->indirect_lock, flags);
512 }
513
514 static void tg3_disable_ints(struct tg3 *tp)
515 {
516         tw32(TG3PCI_MISC_HOST_CTRL,
517              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
518         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
519 }
520
521 static inline void tg3_cond_int(struct tg3 *tp)
522 {
523         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
524             (tp->hw_status->status & SD_STATUS_UPDATED))
525                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
526 }
527
528 static void tg3_enable_ints(struct tg3 *tp)
529 {
530         tp->irq_sync = 0;
531         wmb();
532
533         tw32(TG3PCI_MISC_HOST_CTRL,
534              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
536                        (tp->last_tag << 24));
537         tg3_cond_int(tp);
538 }
539
540 static inline unsigned int tg3_has_work(struct tg3 *tp)
541 {
542         struct tg3_hw_status *sblk = tp->hw_status;
543         unsigned int work_exists = 0;
544
545         /* check for phy events */
546         if (!(tp->tg3_flags &
547               (TG3_FLAG_USE_LINKCHG_REG |
548                TG3_FLAG_POLL_SERDES))) {
549                 if (sblk->status & SD_STATUS_LINK_CHG)
550                         work_exists = 1;
551         }
552         /* check for RX/TX work to do */
553         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
554             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
555                 work_exists = 1;
556
557         return work_exists;
558 }
559
560 /* tg3_restart_ints
561  *  similar to tg3_enable_ints, but it accurately determines whether there
562  *  is new work pending and can return without flushing the PIO write
563  *  which reenables interrupts 
564  */
565 static void tg3_restart_ints(struct tg3 *tp)
566 {
567         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
568                      tp->last_tag << 24);
569         mmiowb();
570
571         /* When doing tagged status, this work check is unnecessary.
572          * The last_tag we write above tells the chip which piece of
573          * work we've completed.
574          */
575         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
576             tg3_has_work(tp))
577                 tw32(HOSTCC_MODE, tp->coalesce_mode |
578                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
579 }
580
581 static inline void tg3_netif_stop(struct tg3 *tp)
582 {
583         tp->dev->trans_start = jiffies; /* prevent tx timeout */
584         netif_poll_disable(tp->dev);
585         netif_tx_disable(tp->dev);
586 }
587
588 static inline void tg3_netif_start(struct tg3 *tp)
589 {
590         netif_wake_queue(tp->dev);
591         /* NOTE: unconditional netif_wake_queue is only appropriate
592          * so long as all callers are assured to have free tx slots
593          * (such as after tg3_init_hw)
594          */
595         netif_poll_enable(tp->dev);
596         tp->hw_status->status |= SD_STATUS_UPDATED;
597         tg3_enable_ints(tp);
598 }
599
600 static void tg3_switch_clocks(struct tg3 *tp)
601 {
602         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
603         u32 orig_clock_ctrl;
604
605         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
606                 return;
607
608         orig_clock_ctrl = clock_ctrl;
609         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
610                        CLOCK_CTRL_CLKRUN_OENABLE |
611                        0x1f);
612         tp->pci_clock_ctrl = clock_ctrl;
613
614         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
615                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
616                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
618                 }
619         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl |
622                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
623                             40);
624                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
625                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
626                             40);
627         }
628         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
629 }
630
631 #define PHY_BUSY_LOOPS  5000
632
633 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
634 {
635         u32 frame_val;
636         unsigned int loops;
637         int ret;
638
639         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
640                 tw32_f(MAC_MI_MODE,
641                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
642                 udelay(80);
643         }
644
645         *val = 0x0;
646
647         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
648                       MI_COM_PHY_ADDR_MASK);
649         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
650                       MI_COM_REG_ADDR_MASK);
651         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
652         
653         tw32_f(MAC_MI_COM, frame_val);
654
655         loops = PHY_BUSY_LOOPS;
656         while (loops != 0) {
657                 udelay(10);
658                 frame_val = tr32(MAC_MI_COM);
659
660                 if ((frame_val & MI_COM_BUSY) == 0) {
661                         udelay(5);
662                         frame_val = tr32(MAC_MI_COM);
663                         break;
664                 }
665                 loops -= 1;
666         }
667
668         ret = -EBUSY;
669         if (loops != 0) {
670                 *val = frame_val & MI_COM_DATA_MASK;
671                 ret = 0;
672         }
673
674         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
675                 tw32_f(MAC_MI_MODE, tp->mi_mode);
676                 udelay(80);
677         }
678
679         return ret;
680 }
681
682 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
683 {
684         u32 frame_val;
685         unsigned int loops;
686         int ret;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700         
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_set_wirespeed(struct tg3 *tp)
728 {
729         u32 val;
730
731         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
732                 return;
733
734         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
735             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
736                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
737                              (val | (1 << 15) | (1 << 4)));
738 }
739
740 static int tg3_bmcr_reset(struct tg3 *tp)
741 {
742         u32 phy_control;
743         int limit, err;
744
745         /* OK, reset it, and poll the BMCR_RESET bit until it
746          * clears or we time out.
747          */
748         phy_control = BMCR_RESET;
749         err = tg3_writephy(tp, MII_BMCR, phy_control);
750         if (err != 0)
751                 return -EBUSY;
752
753         limit = 5000;
754         while (limit--) {
755                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
756                 if (err != 0)
757                         return -EBUSY;
758
759                 if ((phy_control & BMCR_RESET) == 0) {
760                         udelay(40);
761                         break;
762                 }
763                 udelay(10);
764         }
765         if (limit <= 0)
766                 return -EBUSY;
767
768         return 0;
769 }
770
771 static int tg3_wait_macro_done(struct tg3 *tp)
772 {
773         int limit = 100;
774
775         while (limit--) {
776                 u32 tmp32;
777
778                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
779                         if ((tmp32 & 0x1000) == 0)
780                                 break;
781                 }
782         }
783         if (limit <= 0)
784                 return -EBUSY;
785
786         return 0;
787 }
788
789 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
790 {
791         static const u32 test_pat[4][6] = {
792         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
796         };
797         int chan;
798
799         for (chan = 0; chan < 4; chan++) {
800                 int i;
801
802                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
803                              (chan * 0x2000) | 0x0200);
804                 tg3_writephy(tp, 0x16, 0x0002);
805
806                 for (i = 0; i < 6; i++)
807                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
808                                      test_pat[chan][i]);
809
810                 tg3_writephy(tp, 0x16, 0x0202);
811                 if (tg3_wait_macro_done(tp)) {
812                         *resetp = 1;
813                         return -EBUSY;
814                 }
815
816                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
817                              (chan * 0x2000) | 0x0200);
818                 tg3_writephy(tp, 0x16, 0x0082);
819                 if (tg3_wait_macro_done(tp)) {
820                         *resetp = 1;
821                         return -EBUSY;
822                 }
823
824                 tg3_writephy(tp, 0x16, 0x0802);
825                 if (tg3_wait_macro_done(tp)) {
826                         *resetp = 1;
827                         return -EBUSY;
828                 }
829
830                 for (i = 0; i < 6; i += 2) {
831                         u32 low, high;
832
833                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
834                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
835                             tg3_wait_macro_done(tp)) {
836                                 *resetp = 1;
837                                 return -EBUSY;
838                         }
839                         low &= 0x7fff;
840                         high &= 0x000f;
841                         if (low != test_pat[chan][i] ||
842                             high != test_pat[chan][i+1]) {
843                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
844                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
845                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
846
847                                 return -EBUSY;
848                         }
849                 }
850         }
851
852         return 0;
853 }
854
855 static int tg3_phy_reset_chanpat(struct tg3 *tp)
856 {
857         int chan;
858
859         for (chan = 0; chan < 4; chan++) {
860                 int i;
861
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
863                              (chan * 0x2000) | 0x0200);
864                 tg3_writephy(tp, 0x16, 0x0002);
865                 for (i = 0; i < 6; i++)
866                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
867                 tg3_writephy(tp, 0x16, 0x0202);
868                 if (tg3_wait_macro_done(tp))
869                         return -EBUSY;
870         }
871
872         return 0;
873 }
874
875 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
876 {
877         u32 reg32, phy9_orig;
878         int retries, do_phy_reset, err;
879
880         retries = 10;
881         do_phy_reset = 1;
882         do {
883                 if (do_phy_reset) {
884                         err = tg3_bmcr_reset(tp);
885                         if (err)
886                                 return err;
887                         do_phy_reset = 0;
888                 }
889
890                 /* Disable transmitter and interrupt.  */
891                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
892                         continue;
893
894                 reg32 |= 0x3000;
895                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
896
897                 /* Set full-duplex, 1000 mbps.  */
898                 tg3_writephy(tp, MII_BMCR,
899                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
900
901                 /* Set to master mode.  */
902                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
903                         continue;
904
905                 tg3_writephy(tp, MII_TG3_CTRL,
906                              (MII_TG3_CTRL_AS_MASTER |
907                               MII_TG3_CTRL_ENABLE_AS_MASTER));
908
909                 /* Enable SM_DSP_CLOCK and 6dB.  */
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
911
912                 /* Block the PHY control access.  */
913                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
914                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
915
916                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
917                 if (!err)
918                         break;
919         } while (--retries);
920
921         err = tg3_phy_reset_chanpat(tp);
922         if (err)
923                 return err;
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
926         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
927
928         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
929         tg3_writephy(tp, 0x16, 0x0000);
930
931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
933                 /* Set Extended packet length bit for jumbo frames */
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
935         }
936         else {
937                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
938         }
939
940         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
941
942         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
943                 reg32 &= ~0x3000;
944                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
945         } else if (!err)
946                 err = -EBUSY;
947
948         return err;
949 }
950
951 /* This will reset the tigon3 PHY if there is no valid
952  * link unless the FORCE argument is non-zero.
953  */
954 static int tg3_phy_reset(struct tg3 *tp)
955 {
956         u32 phy_status;
957         int err;
958
959         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
960         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
961         if (err != 0)
962                 return -EBUSY;
963
964         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
967                 err = tg3_phy_reset_5703_4_5(tp);
968                 if (err)
969                         return err;
970                 goto out;
971         }
972
973         err = tg3_bmcr_reset(tp);
974         if (err)
975                 return err;
976
977 out:
978         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
979                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
983                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
984                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
985         }
986         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
987                 tg3_writephy(tp, 0x1c, 0x8d68);
988                 tg3_writephy(tp, 0x1c, 0x8d68);
989         }
990         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
991                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
992                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
993                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
994                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
995                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
996                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
997                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
998                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
999         }
1000         /* Set Extended packet length bit (bit 14) on all chips that */
1001         /* support jumbo frames */
1002         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1003                 /* Cannot do read-modify-write on 5401 */
1004                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1005         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1006                 u32 phy_reg;
1007
1008                 /* Set bit 14 with read-modify-write to preserve other bits */
1009                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1010                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1011                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1012         }
1013
1014         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1015          * jumbo frames transmission.
1016          */
1017         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1018                 u32 phy_reg;
1019
1020                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1021                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1022                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1023         }
1024
1025         tg3_phy_set_wirespeed(tp);
1026         return 0;
1027 }
1028
1029 static void tg3_frob_aux_power(struct tg3 *tp)
1030 {
1031         struct tg3 *tp_peer = tp;
1032
1033         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1034                 return;
1035
1036         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1037             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1038                 struct net_device *dev_peer;
1039
1040                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1041                 if (!dev_peer)
1042                         BUG();
1043                 tp_peer = netdev_priv(dev_peer);
1044         }
1045
1046         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1047             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1048             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1049             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1052                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1053                                     (GRC_LCLCTRL_GPIO_OE0 |
1054                                      GRC_LCLCTRL_GPIO_OE1 |
1055                                      GRC_LCLCTRL_GPIO_OE2 |
1056                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1057                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1058                                     100);
1059                 } else {
1060                         u32 no_gpio2;
1061                         u32 grc_local_ctrl = 0;
1062
1063                         if (tp_peer != tp &&
1064                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1065                                 return;
1066
1067                         /* Workaround to prevent overdrawing Amps. */
1068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1069                             ASIC_REV_5714) {
1070                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1071                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072                                             grc_local_ctrl, 100);
1073                         }
1074
1075                         /* On 5753 and variants, GPIO2 cannot be used. */
1076                         no_gpio2 = tp->nic_sram_data_cfg &
1077                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1078
1079                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1080                                          GRC_LCLCTRL_GPIO_OE1 |
1081                                          GRC_LCLCTRL_GPIO_OE2 |
1082                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1083                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1084                         if (no_gpio2) {
1085                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1086                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1087                         }
1088                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                                                     grc_local_ctrl, 100);
1090
1091                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1092
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                                     grc_local_ctrl, 100);
1095
1096                         if (!no_gpio2) {
1097                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1098                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1099                                             grc_local_ctrl, 100);
1100                         }
1101                 }
1102         } else {
1103                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1104                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1110                                     (GRC_LCLCTRL_GPIO_OE1 |
1111                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1112
1113                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                     GRC_LCLCTRL_GPIO_OE1, 100);
1115
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                     (GRC_LCLCTRL_GPIO_OE1 |
1118                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1119                 }
1120         }
1121 }
1122
1123 static int tg3_setup_phy(struct tg3 *, int);
1124
1125 #define RESET_KIND_SHUTDOWN     0
1126 #define RESET_KIND_INIT         1
1127 #define RESET_KIND_SUSPEND      2
1128
1129 static void tg3_write_sig_post_reset(struct tg3 *, int);
1130 static int tg3_halt_cpu(struct tg3 *, u32);
1131 static int tg3_nvram_lock(struct tg3 *);
1132 static void tg3_nvram_unlock(struct tg3 *);
1133
1134 static int tg3_set_power_state(struct tg3 *tp, int state)
1135 {
1136         u32 misc_host_ctrl;
1137         u16 power_control, power_caps;
1138         int pm = tp->pm_cap;
1139
1140         /* Make sure register accesses (indirect or otherwise)
1141          * will function correctly.
1142          */
1143         pci_write_config_dword(tp->pdev,
1144                                TG3PCI_MISC_HOST_CTRL,
1145                                tp->misc_host_ctrl);
1146
1147         pci_read_config_word(tp->pdev,
1148                              pm + PCI_PM_CTRL,
1149                              &power_control);
1150         power_control |= PCI_PM_CTRL_PME_STATUS;
1151         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1152         switch (state) {
1153         case 0:
1154                 power_control |= 0;
1155                 pci_write_config_word(tp->pdev,
1156                                       pm + PCI_PM_CTRL,
1157                                       power_control);
1158                 udelay(100);    /* Delay after power state change */
1159
1160                 /* Switch out of Vaux if it is not a LOM */
1161                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1162                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1163
1164                 return 0;
1165
1166         case 1:
1167                 power_control |= 1;
1168                 break;
1169
1170         case 2:
1171                 power_control |= 2;
1172                 break;
1173
1174         case 3:
1175                 power_control |= 3;
1176                 break;
1177
1178         default:
1179                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1180                        "requested.\n",
1181                        tp->dev->name, state);
1182                 return -EINVAL;
1183         };
1184
1185         power_control |= PCI_PM_CTRL_PME_ENABLE;
1186
1187         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1188         tw32(TG3PCI_MISC_HOST_CTRL,
1189              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1190
1191         if (tp->link_config.phy_is_low_power == 0) {
1192                 tp->link_config.phy_is_low_power = 1;
1193                 tp->link_config.orig_speed = tp->link_config.speed;
1194                 tp->link_config.orig_duplex = tp->link_config.duplex;
1195                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1196         }
1197
1198         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1199                 tp->link_config.speed = SPEED_10;
1200                 tp->link_config.duplex = DUPLEX_HALF;
1201                 tp->link_config.autoneg = AUTONEG_ENABLE;
1202                 tg3_setup_phy(tp, 0);
1203         }
1204
1205         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1206                 int i;
1207                 u32 val;
1208
1209                 for (i = 0; i < 200; i++) {
1210                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1211                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1212                                 break;
1213                         msleep(1);
1214                 }
1215         }
1216         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1217                                              WOL_DRV_STATE_SHUTDOWN |
1218                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1219
1220         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1221
1222         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1223                 u32 mac_mode;
1224
1225                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1226                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1227                         udelay(40);
1228
1229                         mac_mode = MAC_MODE_PORT_MODE_MII;
1230
1231                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1232                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1233                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1234                 } else {
1235                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1236                 }
1237
1238                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1239                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1240
1241                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1242                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1243                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1244
1245                 tw32_f(MAC_MODE, mac_mode);
1246                 udelay(100);
1247
1248                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1249                 udelay(10);
1250         }
1251
1252         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1253             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1254              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1255                 u32 base_val;
1256
1257                 base_val = tp->pci_clock_ctrl;
1258                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1259                              CLOCK_CTRL_TXCLK_DISABLE);
1260
1261                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1262                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1263         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1264                 /* do nothing */
1265         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1266                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1267                 u32 newbits1, newbits2;
1268
1269                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1270                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1271                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1272                                     CLOCK_CTRL_TXCLK_DISABLE |
1273                                     CLOCK_CTRL_ALTCLK);
1274                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1275                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1276                         newbits1 = CLOCK_CTRL_625_CORE;
1277                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1278                 } else {
1279                         newbits1 = CLOCK_CTRL_ALTCLK;
1280                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1281                 }
1282
1283                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1284                             40);
1285
1286                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1287                             40);
1288
1289                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1290                         u32 newbits3;
1291
1292                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1293                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1294                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1295                                             CLOCK_CTRL_TXCLK_DISABLE |
1296                                             CLOCK_CTRL_44MHZ_CORE);
1297                         } else {
1298                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1299                         }
1300
1301                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1302                                     tp->pci_clock_ctrl | newbits3, 40);
1303                 }
1304         }
1305
1306         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1307             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1308                 /* Turn off the PHY */
1309                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1310                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1311                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1312                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1313                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1314                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1315                 }
1316         }
1317
1318         tg3_frob_aux_power(tp);
1319
1320         /* Workaround for unstable PLL clock */
1321         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1322             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1323                 u32 val = tr32(0x7d00);
1324
1325                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326                 tw32(0x7d00, val);
1327                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328                         int err;
1329
1330                         err = tg3_nvram_lock(tp);
1331                         tg3_halt_cpu(tp, RX_CPU_BASE);
1332                         if (!err)
1333                                 tg3_nvram_unlock(tp);
1334                 }
1335         }
1336
1337         /* Finally, set the new power state. */
1338         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1339         udelay(100);    /* Delay after power state change */
1340
1341         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1342
1343         return 0;
1344 }
1345
1346 static void tg3_link_report(struct tg3 *tp)
1347 {
1348         if (!netif_carrier_ok(tp->dev)) {
1349                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1350         } else {
1351                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1352                        tp->dev->name,
1353                        (tp->link_config.active_speed == SPEED_1000 ?
1354                         1000 :
1355                         (tp->link_config.active_speed == SPEED_100 ?
1356                          100 : 10)),
1357                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1358                         "full" : "half"));
1359
1360                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1361                        "%s for RX.\n",
1362                        tp->dev->name,
1363                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1364                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1365         }
1366 }
1367
1368 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1369 {
1370         u32 new_tg3_flags = 0;
1371         u32 old_rx_mode = tp->rx_mode;
1372         u32 old_tx_mode = tp->tx_mode;
1373
1374         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1375
1376                 /* Convert 1000BaseX flow control bits to 1000BaseT
1377                  * bits before resolving flow control.
1378                  */
1379                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1380                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1381                                        ADVERTISE_PAUSE_ASYM);
1382                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1383
1384                         if (local_adv & ADVERTISE_1000XPAUSE)
1385                                 local_adv |= ADVERTISE_PAUSE_CAP;
1386                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1387                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1388                         if (remote_adv & LPA_1000XPAUSE)
1389                                 remote_adv |= LPA_PAUSE_CAP;
1390                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1391                                 remote_adv |= LPA_PAUSE_ASYM;
1392                 }
1393
1394                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1395                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1396                                 if (remote_adv & LPA_PAUSE_CAP)
1397                                         new_tg3_flags |=
1398                                                 (TG3_FLAG_RX_PAUSE |
1399                                                 TG3_FLAG_TX_PAUSE);
1400                                 else if (remote_adv & LPA_PAUSE_ASYM)
1401                                         new_tg3_flags |=
1402                                                 (TG3_FLAG_RX_PAUSE);
1403                         } else {
1404                                 if (remote_adv & LPA_PAUSE_CAP)
1405                                         new_tg3_flags |=
1406                                                 (TG3_FLAG_RX_PAUSE |
1407                                                 TG3_FLAG_TX_PAUSE);
1408                         }
1409                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410                         if ((remote_adv & LPA_PAUSE_CAP) &&
1411                         (remote_adv & LPA_PAUSE_ASYM))
1412                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1413                 }
1414
1415                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1416                 tp->tg3_flags |= new_tg3_flags;
1417         } else {
1418                 new_tg3_flags = tp->tg3_flags;
1419         }
1420
1421         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1422                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1423         else
1424                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1425
1426         if (old_rx_mode != tp->rx_mode) {
1427                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1428         }
1429         
1430         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1431                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1432         else
1433                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1434
1435         if (old_tx_mode != tp->tx_mode) {
1436                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1437         }
1438 }
1439
1440 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1441 {
1442         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1443         case MII_TG3_AUX_STAT_10HALF:
1444                 *speed = SPEED_10;
1445                 *duplex = DUPLEX_HALF;
1446                 break;
1447
1448         case MII_TG3_AUX_STAT_10FULL:
1449                 *speed = SPEED_10;
1450                 *duplex = DUPLEX_FULL;
1451                 break;
1452
1453         case MII_TG3_AUX_STAT_100HALF:
1454                 *speed = SPEED_100;
1455                 *duplex = DUPLEX_HALF;
1456                 break;
1457
1458         case MII_TG3_AUX_STAT_100FULL:
1459                 *speed = SPEED_100;
1460                 *duplex = DUPLEX_FULL;
1461                 break;
1462
1463         case MII_TG3_AUX_STAT_1000HALF:
1464                 *speed = SPEED_1000;
1465                 *duplex = DUPLEX_HALF;
1466                 break;
1467
1468         case MII_TG3_AUX_STAT_1000FULL:
1469                 *speed = SPEED_1000;
1470                 *duplex = DUPLEX_FULL;
1471                 break;
1472
1473         default:
1474                 *speed = SPEED_INVALID;
1475                 *duplex = DUPLEX_INVALID;
1476                 break;
1477         };
1478 }
1479
1480 static void tg3_phy_copper_begin(struct tg3 *tp)
1481 {
1482         u32 new_adv;
1483         int i;
1484
1485         if (tp->link_config.phy_is_low_power) {
1486                 /* Entering low power mode.  Disable gigabit and
1487                  * 100baseT advertisements.
1488                  */
1489                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1490
1491                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1492                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1493                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1494                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1495
1496                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1497         } else if (tp->link_config.speed == SPEED_INVALID) {
1498                 tp->link_config.advertising =
1499                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1500                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1501                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1502                          ADVERTISED_Autoneg | ADVERTISED_MII);
1503
1504                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1505                         tp->link_config.advertising &=
1506                                 ~(ADVERTISED_1000baseT_Half |
1507                                   ADVERTISED_1000baseT_Full);
1508
1509                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1511                         new_adv |= ADVERTISE_10HALF;
1512                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1513                         new_adv |= ADVERTISE_10FULL;
1514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1515                         new_adv |= ADVERTISE_100HALF;
1516                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1517                         new_adv |= ADVERTISE_100FULL;
1518                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1519
1520                 if (tp->link_config.advertising &
1521                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1522                         new_adv = 0;
1523                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1524                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1525                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1526                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1527                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1528                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1529                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1530                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1531                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1532                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1533                 } else {
1534                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1535                 }
1536         } else {
1537                 /* Asking for a specific link mode. */
1538                 if (tp->link_config.speed == SPEED_1000) {
1539                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1540                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1541
1542                         if (tp->link_config.duplex == DUPLEX_FULL)
1543                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1544                         else
1545                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1546                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1547                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1548                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1549                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1550                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1551                 } else {
1552                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1553
1554                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1555                         if (tp->link_config.speed == SPEED_100) {
1556                                 if (tp->link_config.duplex == DUPLEX_FULL)
1557                                         new_adv |= ADVERTISE_100FULL;
1558                                 else
1559                                         new_adv |= ADVERTISE_100HALF;
1560                         } else {
1561                                 if (tp->link_config.duplex == DUPLEX_FULL)
1562                                         new_adv |= ADVERTISE_10FULL;
1563                                 else
1564                                         new_adv |= ADVERTISE_10HALF;
1565                         }
1566                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1567                 }
1568         }
1569
1570         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1571             tp->link_config.speed != SPEED_INVALID) {
1572                 u32 bmcr, orig_bmcr;
1573
1574                 tp->link_config.active_speed = tp->link_config.speed;
1575                 tp->link_config.active_duplex = tp->link_config.duplex;
1576
1577                 bmcr = 0;
1578                 switch (tp->link_config.speed) {
1579                 default:
1580                 case SPEED_10:
1581                         break;
1582
1583                 case SPEED_100:
1584                         bmcr |= BMCR_SPEED100;
1585                         break;
1586
1587                 case SPEED_1000:
1588                         bmcr |= TG3_BMCR_SPEED1000;
1589                         break;
1590                 };
1591
1592                 if (tp->link_config.duplex == DUPLEX_FULL)
1593                         bmcr |= BMCR_FULLDPLX;
1594
1595                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1596                     (bmcr != orig_bmcr)) {
1597                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1598                         for (i = 0; i < 1500; i++) {
1599                                 u32 tmp;
1600
1601                                 udelay(10);
1602                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1603                                     tg3_readphy(tp, MII_BMSR, &tmp))
1604                                         continue;
1605                                 if (!(tmp & BMSR_LSTATUS)) {
1606                                         udelay(40);
1607                                         break;
1608                                 }
1609                         }
1610                         tg3_writephy(tp, MII_BMCR, bmcr);
1611                         udelay(40);
1612                 }
1613         } else {
1614                 tg3_writephy(tp, MII_BMCR,
1615                              BMCR_ANENABLE | BMCR_ANRESTART);
1616         }
1617 }
1618
1619 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1620 {
1621         int err;
1622
1623         /* Turn off tap power management. */
1624         /* Set Extended packet length bit */
1625         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1626
1627         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1628         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1629
1630         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1631         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1632
1633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1635
1636         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1637         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1638
1639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1641
1642         udelay(40);
1643
1644         return err;
1645 }
1646
1647 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1648 {
1649         u32 adv_reg, all_mask;
1650
1651         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1652                 return 0;
1653
1654         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1655                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1656         if ((adv_reg & all_mask) != all_mask)
1657                 return 0;
1658         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1659                 u32 tg3_ctrl;
1660
1661                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1662                         return 0;
1663
1664                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1665                             MII_TG3_CTRL_ADV_1000_FULL);
1666                 if ((tg3_ctrl & all_mask) != all_mask)
1667                         return 0;
1668         }
1669         return 1;
1670 }
1671
1672 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1673 {
1674         int current_link_up;
1675         u32 bmsr, dummy;
1676         u16 current_speed;
1677         u8 current_duplex;
1678         int i, err;
1679
1680         tw32(MAC_EVENT, 0);
1681
1682         tw32_f(MAC_STATUS,
1683              (MAC_STATUS_SYNC_CHANGED |
1684               MAC_STATUS_CFG_CHANGED |
1685               MAC_STATUS_MI_COMPLETION |
1686               MAC_STATUS_LNKSTATE_CHANGED));
1687         udelay(40);
1688
1689         tp->mi_mode = MAC_MI_MODE_BASE;
1690         tw32_f(MAC_MI_MODE, tp->mi_mode);
1691         udelay(80);
1692
1693         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1694
1695         /* Some third-party PHYs need to be reset on link going
1696          * down.
1697          */
1698         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1699              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1700              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1701             netif_carrier_ok(tp->dev)) {
1702                 tg3_readphy(tp, MII_BMSR, &bmsr);
1703                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1704                     !(bmsr & BMSR_LSTATUS))
1705                         force_reset = 1;
1706         }
1707         if (force_reset)
1708                 tg3_phy_reset(tp);
1709
1710         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1711                 tg3_readphy(tp, MII_BMSR, &bmsr);
1712                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1713                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1714                         bmsr = 0;
1715
1716                 if (!(bmsr & BMSR_LSTATUS)) {
1717                         err = tg3_init_5401phy_dsp(tp);
1718                         if (err)
1719                                 return err;
1720
1721                         tg3_readphy(tp, MII_BMSR, &bmsr);
1722                         for (i = 0; i < 1000; i++) {
1723                                 udelay(10);
1724                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1725                                     (bmsr & BMSR_LSTATUS)) {
1726                                         udelay(40);
1727                                         break;
1728                                 }
1729                         }
1730
1731                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1732                             !(bmsr & BMSR_LSTATUS) &&
1733                             tp->link_config.active_speed == SPEED_1000) {
1734                                 err = tg3_phy_reset(tp);
1735                                 if (!err)
1736                                         err = tg3_init_5401phy_dsp(tp);
1737                                 if (err)
1738                                         return err;
1739                         }
1740                 }
1741         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1742                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1743                 /* 5701 {A0,B0} CRC bug workaround */
1744                 tg3_writephy(tp, 0x15, 0x0a75);
1745                 tg3_writephy(tp, 0x1c, 0x8c68);
1746                 tg3_writephy(tp, 0x1c, 0x8d68);
1747                 tg3_writephy(tp, 0x1c, 0x8c68);
1748         }
1749
1750         /* Clear pending interrupts... */
1751         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1752         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1753
1754         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1755                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1756         else
1757                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1758
1759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1761                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1762                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1763                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1764                 else
1765                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1766         }
1767
1768         current_link_up = 0;
1769         current_speed = SPEED_INVALID;
1770         current_duplex = DUPLEX_INVALID;
1771
1772         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1773                 u32 val;
1774
1775                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1776                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1777                 if (!(val & (1 << 10))) {
1778                         val |= (1 << 10);
1779                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1780                         goto relink;
1781                 }
1782         }
1783
1784         bmsr = 0;
1785         for (i = 0; i < 100; i++) {
1786                 tg3_readphy(tp, MII_BMSR, &bmsr);
1787                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1788                     (bmsr & BMSR_LSTATUS))
1789                         break;
1790                 udelay(40);
1791         }
1792
1793         if (bmsr & BMSR_LSTATUS) {
1794                 u32 aux_stat, bmcr;
1795
1796                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1797                 for (i = 0; i < 2000; i++) {
1798                         udelay(10);
1799                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1800                             aux_stat)
1801                                 break;
1802                 }
1803
1804                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1805                                              &current_speed,
1806                                              &current_duplex);
1807
1808                 bmcr = 0;
1809                 for (i = 0; i < 200; i++) {
1810                         tg3_readphy(tp, MII_BMCR, &bmcr);
1811                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1812                                 continue;
1813                         if (bmcr && bmcr != 0x7fff)
1814                                 break;
1815                         udelay(10);
1816                 }
1817
1818                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1819                         if (bmcr & BMCR_ANENABLE) {
1820                                 current_link_up = 1;
1821
1822                                 /* Force autoneg restart if we are exiting
1823                                  * low power mode.
1824                                  */
1825                                 if (!tg3_copper_is_advertising_all(tp))
1826                                         current_link_up = 0;
1827                         } else {
1828                                 current_link_up = 0;
1829                         }
1830                 } else {
1831                         if (!(bmcr & BMCR_ANENABLE) &&
1832                             tp->link_config.speed == current_speed &&
1833                             tp->link_config.duplex == current_duplex) {
1834                                 current_link_up = 1;
1835                         } else {
1836                                 current_link_up = 0;
1837                         }
1838                 }
1839
1840                 tp->link_config.active_speed = current_speed;
1841                 tp->link_config.active_duplex = current_duplex;
1842         }
1843
1844         if (current_link_up == 1 &&
1845             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1846             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1847                 u32 local_adv, remote_adv;
1848
1849                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1850                         local_adv = 0;
1851                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1852
1853                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1854                         remote_adv = 0;
1855
1856                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1857
1858                 /* If we are not advertising full pause capability,
1859                  * something is wrong.  Bring the link down and reconfigure.
1860                  */
1861                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1862                         current_link_up = 0;
1863                 } else {
1864                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1865                 }
1866         }
1867 relink:
1868         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1869                 u32 tmp;
1870
1871                 tg3_phy_copper_begin(tp);
1872
1873                 tg3_readphy(tp, MII_BMSR, &tmp);
1874                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1875                     (tmp & BMSR_LSTATUS))
1876                         current_link_up = 1;
1877         }
1878
1879         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1880         if (current_link_up == 1) {
1881                 if (tp->link_config.active_speed == SPEED_100 ||
1882                     tp->link_config.active_speed == SPEED_10)
1883                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1884                 else
1885                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886         } else
1887                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1888
1889         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1890         if (tp->link_config.active_duplex == DUPLEX_HALF)
1891                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1892
1893         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1895                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1896                     (current_link_up == 1 &&
1897                      tp->link_config.active_speed == SPEED_10))
1898                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1899         } else {
1900                 if (current_link_up == 1)
1901                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1902         }
1903
1904         /* ??? Without this setting Netgear GA302T PHY does not
1905          * ??? send/receive packets...
1906          */
1907         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1908             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1909                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1910                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1911                 udelay(80);
1912         }
1913
1914         tw32_f(MAC_MODE, tp->mac_mode);
1915         udelay(40);
1916
1917         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1918                 /* Polled via timer. */
1919                 tw32_f(MAC_EVENT, 0);
1920         } else {
1921                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1922         }
1923         udelay(40);
1924
1925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1926             current_link_up == 1 &&
1927             tp->link_config.active_speed == SPEED_1000 &&
1928             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1929              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1930                 udelay(120);
1931                 tw32_f(MAC_STATUS,
1932                      (MAC_STATUS_SYNC_CHANGED |
1933                       MAC_STATUS_CFG_CHANGED));
1934                 udelay(40);
1935                 tg3_write_mem(tp,
1936                               NIC_SRAM_FIRMWARE_MBOX,
1937                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1938         }
1939
1940         if (current_link_up != netif_carrier_ok(tp->dev)) {
1941                 if (current_link_up)
1942                         netif_carrier_on(tp->dev);
1943                 else
1944                         netif_carrier_off(tp->dev);
1945                 tg3_link_report(tp);
1946         }
1947
1948         return 0;
1949 }
1950
1951 struct tg3_fiber_aneginfo {
1952         int state;
1953 #define ANEG_STATE_UNKNOWN              0
1954 #define ANEG_STATE_AN_ENABLE            1
1955 #define ANEG_STATE_RESTART_INIT         2
1956 #define ANEG_STATE_RESTART              3
1957 #define ANEG_STATE_DISABLE_LINK_OK      4
1958 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1959 #define ANEG_STATE_ABILITY_DETECT       6
1960 #define ANEG_STATE_ACK_DETECT_INIT      7
1961 #define ANEG_STATE_ACK_DETECT           8
1962 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1963 #define ANEG_STATE_COMPLETE_ACK         10
1964 #define ANEG_STATE_IDLE_DETECT_INIT     11
1965 #define ANEG_STATE_IDLE_DETECT          12
1966 #define ANEG_STATE_LINK_OK              13
1967 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1968 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1969
1970         u32 flags;
1971 #define MR_AN_ENABLE            0x00000001
1972 #define MR_RESTART_AN           0x00000002
1973 #define MR_AN_COMPLETE          0x00000004
1974 #define MR_PAGE_RX              0x00000008
1975 #define MR_NP_LOADED            0x00000010
1976 #define MR_TOGGLE_TX            0x00000020
1977 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1978 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1979 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1980 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1981 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1982 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1983 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1984 #define MR_TOGGLE_RX            0x00002000
1985 #define MR_NP_RX                0x00004000
1986
1987 #define MR_LINK_OK              0x80000000
1988
1989         unsigned long link_time, cur_time;
1990
1991         u32 ability_match_cfg;
1992         int ability_match_count;
1993
1994         char ability_match, idle_match, ack_match;
1995
1996         u32 txconfig, rxconfig;
1997 #define ANEG_CFG_NP             0x00000080
1998 #define ANEG_CFG_ACK            0x00000040
1999 #define ANEG_CFG_RF2            0x00000020
2000 #define ANEG_CFG_RF1            0x00000010
2001 #define ANEG_CFG_PS2            0x00000001
2002 #define ANEG_CFG_PS1            0x00008000
2003 #define ANEG_CFG_HD             0x00004000
2004 #define ANEG_CFG_FD             0x00002000
2005 #define ANEG_CFG_INVAL          0x00001f06
2006
2007 };
2008 #define ANEG_OK         0
2009 #define ANEG_DONE       1
2010 #define ANEG_TIMER_ENAB 2
2011 #define ANEG_FAILED     -1
2012
2013 #define ANEG_STATE_SETTLE_TIME  10000
2014
2015 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2016                                    struct tg3_fiber_aneginfo *ap)
2017 {
2018         unsigned long delta;
2019         u32 rx_cfg_reg;
2020         int ret;
2021
2022         if (ap->state == ANEG_STATE_UNKNOWN) {
2023                 ap->rxconfig = 0;
2024                 ap->link_time = 0;
2025                 ap->cur_time = 0;
2026                 ap->ability_match_cfg = 0;
2027                 ap->ability_match_count = 0;
2028                 ap->ability_match = 0;
2029                 ap->idle_match = 0;
2030                 ap->ack_match = 0;
2031         }
2032         ap->cur_time++;
2033
2034         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2035                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2036
2037                 if (rx_cfg_reg != ap->ability_match_cfg) {
2038                         ap->ability_match_cfg = rx_cfg_reg;
2039                         ap->ability_match = 0;
2040                         ap->ability_match_count = 0;
2041                 } else {
2042                         if (++ap->ability_match_count > 1) {
2043                                 ap->ability_match = 1;
2044                                 ap->ability_match_cfg = rx_cfg_reg;
2045                         }
2046                 }
2047                 if (rx_cfg_reg & ANEG_CFG_ACK)
2048                         ap->ack_match = 1;
2049                 else
2050                         ap->ack_match = 0;
2051
2052                 ap->idle_match = 0;
2053         } else {
2054                 ap->idle_match = 1;
2055                 ap->ability_match_cfg = 0;
2056                 ap->ability_match_count = 0;
2057                 ap->ability_match = 0;
2058                 ap->ack_match = 0;
2059
2060                 rx_cfg_reg = 0;
2061         }
2062
2063         ap->rxconfig = rx_cfg_reg;
2064         ret = ANEG_OK;
2065
2066         switch(ap->state) {
2067         case ANEG_STATE_UNKNOWN:
2068                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2069                         ap->state = ANEG_STATE_AN_ENABLE;
2070
2071                 /* fallthru */
2072         case ANEG_STATE_AN_ENABLE:
2073                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2074                 if (ap->flags & MR_AN_ENABLE) {
2075                         ap->link_time = 0;
2076                         ap->cur_time = 0;
2077                         ap->ability_match_cfg = 0;
2078                         ap->ability_match_count = 0;
2079                         ap->ability_match = 0;
2080                         ap->idle_match = 0;
2081                         ap->ack_match = 0;
2082
2083                         ap->state = ANEG_STATE_RESTART_INIT;
2084                 } else {
2085                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2086                 }
2087                 break;
2088
2089         case ANEG_STATE_RESTART_INIT:
2090                 ap->link_time = ap->cur_time;
2091                 ap->flags &= ~(MR_NP_LOADED);
2092                 ap->txconfig = 0;
2093                 tw32(MAC_TX_AUTO_NEG, 0);
2094                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2095                 tw32_f(MAC_MODE, tp->mac_mode);
2096                 udelay(40);
2097
2098                 ret = ANEG_TIMER_ENAB;
2099                 ap->state = ANEG_STATE_RESTART;
2100
2101                 /* fallthru */
2102         case ANEG_STATE_RESTART:
2103                 delta = ap->cur_time - ap->link_time;
2104                 if (delta > ANEG_STATE_SETTLE_TIME) {
2105                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2106                 } else {
2107                         ret = ANEG_TIMER_ENAB;
2108                 }
2109                 break;
2110
2111         case ANEG_STATE_DISABLE_LINK_OK:
2112                 ret = ANEG_DONE;
2113                 break;
2114
2115         case ANEG_STATE_ABILITY_DETECT_INIT:
2116                 ap->flags &= ~(MR_TOGGLE_TX);
2117                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2118                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2119                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2120                 tw32_f(MAC_MODE, tp->mac_mode);
2121                 udelay(40);
2122
2123                 ap->state = ANEG_STATE_ABILITY_DETECT;
2124                 break;
2125
2126         case ANEG_STATE_ABILITY_DETECT:
2127                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2128                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2129                 }
2130                 break;
2131
2132         case ANEG_STATE_ACK_DETECT_INIT:
2133                 ap->txconfig |= ANEG_CFG_ACK;
2134                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2135                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2136                 tw32_f(MAC_MODE, tp->mac_mode);
2137                 udelay(40);
2138
2139                 ap->state = ANEG_STATE_ACK_DETECT;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_ACK_DETECT:
2143                 if (ap->ack_match != 0) {
2144                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2145                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2146                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2147                         } else {
2148                                 ap->state = ANEG_STATE_AN_ENABLE;
2149                         }
2150                 } else if (ap->ability_match != 0 &&
2151                            ap->rxconfig == 0) {
2152                         ap->state = ANEG_STATE_AN_ENABLE;
2153                 }
2154                 break;
2155
2156         case ANEG_STATE_COMPLETE_ACK_INIT:
2157                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2158                         ret = ANEG_FAILED;
2159                         break;
2160                 }
2161                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2162                                MR_LP_ADV_HALF_DUPLEX |
2163                                MR_LP_ADV_SYM_PAUSE |
2164                                MR_LP_ADV_ASYM_PAUSE |
2165                                MR_LP_ADV_REMOTE_FAULT1 |
2166                                MR_LP_ADV_REMOTE_FAULT2 |
2167                                MR_LP_ADV_NEXT_PAGE |
2168                                MR_TOGGLE_RX |
2169                                MR_NP_RX);
2170                 if (ap->rxconfig & ANEG_CFG_FD)
2171                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2172                 if (ap->rxconfig & ANEG_CFG_HD)
2173                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2174                 if (ap->rxconfig & ANEG_CFG_PS1)
2175                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2176                 if (ap->rxconfig & ANEG_CFG_PS2)
2177                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2178                 if (ap->rxconfig & ANEG_CFG_RF1)
2179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2180                 if (ap->rxconfig & ANEG_CFG_RF2)
2181                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2182                 if (ap->rxconfig & ANEG_CFG_NP)
2183                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2184
2185                 ap->link_time = ap->cur_time;
2186
2187                 ap->flags ^= (MR_TOGGLE_TX);
2188                 if (ap->rxconfig & 0x0008)
2189                         ap->flags |= MR_TOGGLE_RX;
2190                 if (ap->rxconfig & ANEG_CFG_NP)
2191                         ap->flags |= MR_NP_RX;
2192                 ap->flags |= MR_PAGE_RX;
2193
2194                 ap->state = ANEG_STATE_COMPLETE_ACK;
2195                 ret = ANEG_TIMER_ENAB;
2196                 break;
2197
2198         case ANEG_STATE_COMPLETE_ACK:
2199                 if (ap->ability_match != 0 &&
2200                     ap->rxconfig == 0) {
2201                         ap->state = ANEG_STATE_AN_ENABLE;
2202                         break;
2203                 }
2204                 delta = ap->cur_time - ap->link_time;
2205                 if (delta > ANEG_STATE_SETTLE_TIME) {
2206                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2207                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208                         } else {
2209                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2210                                     !(ap->flags & MR_NP_RX)) {
2211                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2212                                 } else {
2213                                         ret = ANEG_FAILED;
2214                                 }
2215                         }
2216                 }
2217                 break;
2218
2219         case ANEG_STATE_IDLE_DETECT_INIT:
2220                 ap->link_time = ap->cur_time;
2221                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2222                 tw32_f(MAC_MODE, tp->mac_mode);
2223                 udelay(40);
2224
2225                 ap->state = ANEG_STATE_IDLE_DETECT;
2226                 ret = ANEG_TIMER_ENAB;
2227                 break;
2228
2229         case ANEG_STATE_IDLE_DETECT:
2230                 if (ap->ability_match != 0 &&
2231                     ap->rxconfig == 0) {
2232                         ap->state = ANEG_STATE_AN_ENABLE;
2233                         break;
2234                 }
2235                 delta = ap->cur_time - ap->link_time;
2236                 if (delta > ANEG_STATE_SETTLE_TIME) {
2237                         /* XXX another gem from the Broadcom driver :( */
2238                         ap->state = ANEG_STATE_LINK_OK;
2239                 }
2240                 break;
2241
2242         case ANEG_STATE_LINK_OK:
2243                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2244                 ret = ANEG_DONE;
2245                 break;
2246
2247         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2248                 /* ??? unimplemented */
2249                 break;
2250
2251         case ANEG_STATE_NEXT_PAGE_WAIT:
2252                 /* ??? unimplemented */
2253                 break;
2254
2255         default:
2256                 ret = ANEG_FAILED;
2257                 break;
2258         };
2259
2260         return ret;
2261 }
2262
2263 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2264 {
2265         int res = 0;
2266         struct tg3_fiber_aneginfo aninfo;
2267         int status = ANEG_FAILED;
2268         unsigned int tick;
2269         u32 tmp;
2270
2271         tw32_f(MAC_TX_AUTO_NEG, 0);
2272
2273         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2274         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2275         udelay(40);
2276
2277         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2278         udelay(40);
2279
2280         memset(&aninfo, 0, sizeof(aninfo));
2281         aninfo.flags |= MR_AN_ENABLE;
2282         aninfo.state = ANEG_STATE_UNKNOWN;
2283         aninfo.cur_time = 0;
2284         tick = 0;
2285         while (++tick < 195000) {
2286                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2287                 if (status == ANEG_DONE || status == ANEG_FAILED)
2288                         break;
2289
2290                 udelay(1);
2291         }
2292
2293         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2294         tw32_f(MAC_MODE, tp->mac_mode);
2295         udelay(40);
2296
2297         *flags = aninfo.flags;
2298
2299         if (status == ANEG_DONE &&
2300             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2301                              MR_LP_ADV_FULL_DUPLEX)))
2302                 res = 1;
2303
2304         return res;
2305 }
2306
2307 static void tg3_init_bcm8002(struct tg3 *tp)
2308 {
2309         u32 mac_status = tr32(MAC_STATUS);
2310         int i;
2311
2312         /* Reset when initting first time or we have a link. */
2313         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2314             !(mac_status & MAC_STATUS_PCS_SYNCED))
2315                 return;
2316
2317         /* Set PLL lock range. */
2318         tg3_writephy(tp, 0x16, 0x8007);
2319
2320         /* SW reset */
2321         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2322
2323         /* Wait for reset to complete. */
2324         /* XXX schedule_timeout() ... */
2325         for (i = 0; i < 500; i++)
2326                 udelay(10);
2327
2328         /* Config mode; select PMA/Ch 1 regs. */
2329         tg3_writephy(tp, 0x10, 0x8411);
2330
2331         /* Enable auto-lock and comdet, select txclk for tx. */
2332         tg3_writephy(tp, 0x11, 0x0a10);
2333
2334         tg3_writephy(tp, 0x18, 0x00a0);
2335         tg3_writephy(tp, 0x16, 0x41ff);
2336
2337         /* Assert and deassert POR. */
2338         tg3_writephy(tp, 0x13, 0x0400);
2339         udelay(40);
2340         tg3_writephy(tp, 0x13, 0x0000);
2341
2342         tg3_writephy(tp, 0x11, 0x0a50);
2343         udelay(40);
2344         tg3_writephy(tp, 0x11, 0x0a10);
2345
2346         /* Wait for signal to stabilize */
2347         /* XXX schedule_timeout() ... */
2348         for (i = 0; i < 15000; i++)
2349                 udelay(10);
2350
2351         /* Deselect the channel register so we can read the PHYID
2352          * later.
2353          */
2354         tg3_writephy(tp, 0x10, 0x8011);
2355 }
2356
2357 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2358 {
2359         u32 sg_dig_ctrl, sg_dig_status;
2360         u32 serdes_cfg, expected_sg_dig_ctrl;
2361         int workaround, port_a;
2362         int current_link_up;
2363
2364         serdes_cfg = 0;
2365         expected_sg_dig_ctrl = 0;
2366         workaround = 0;
2367         port_a = 1;
2368         current_link_up = 0;
2369
2370         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2371             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2372                 workaround = 1;
2373                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2374                         port_a = 0;
2375
2376                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2377                 /* preserve bits 20-23 for voltage regulator */
2378                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2379         }
2380
2381         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2382
2383         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2384                 if (sg_dig_ctrl & (1 << 31)) {
2385                         if (workaround) {
2386                                 u32 val = serdes_cfg;
2387
2388                                 if (port_a)
2389                                         val |= 0xc010000;
2390                                 else
2391                                         val |= 0x4010000;
2392                                 tw32_f(MAC_SERDES_CFG, val);
2393                         }
2394                         tw32_f(SG_DIG_CTRL, 0x01388400);
2395                 }
2396                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2397                         tg3_setup_flow_control(tp, 0, 0);
2398                         current_link_up = 1;
2399                 }
2400                 goto out;
2401         }
2402
2403         /* Want auto-negotiation.  */
2404         expected_sg_dig_ctrl = 0x81388400;
2405
2406         /* Pause capability */
2407         expected_sg_dig_ctrl |= (1 << 11);
2408
2409         /* Asymettric pause */
2410         expected_sg_dig_ctrl |= (1 << 12);
2411
2412         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2413                 if (workaround)
2414                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2415                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2416                 udelay(5);
2417                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2418
2419                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2420         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2421                                  MAC_STATUS_SIGNAL_DET)) {
2422                 int i;
2423
2424                 /* Giver time to negotiate (~200ms) */
2425                 for (i = 0; i < 40000; i++) {
2426                         sg_dig_status = tr32(SG_DIG_STATUS);
2427                         if (sg_dig_status & (0x3))
2428                                 break;
2429                         udelay(5);
2430                 }
2431                 mac_status = tr32(MAC_STATUS);
2432
2433                 if ((sg_dig_status & (1 << 1)) &&
2434                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2435                         u32 local_adv, remote_adv;
2436
2437                         local_adv = ADVERTISE_PAUSE_CAP;
2438                         remote_adv = 0;
2439                         if (sg_dig_status & (1 << 19))
2440                                 remote_adv |= LPA_PAUSE_CAP;
2441                         if (sg_dig_status & (1 << 20))
2442                                 remote_adv |= LPA_PAUSE_ASYM;
2443
2444                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2445                         current_link_up = 1;
2446                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2447                 } else if (!(sg_dig_status & (1 << 1))) {
2448                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2449                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2450                         else {
2451                                 if (workaround) {
2452                                         u32 val = serdes_cfg;
2453
2454                                         if (port_a)
2455                                                 val |= 0xc010000;
2456                                         else
2457                                                 val |= 0x4010000;
2458
2459                                         tw32_f(MAC_SERDES_CFG, val);
2460                                 }
2461
2462                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2463                                 udelay(40);
2464
2465                                 /* Link parallel detection - link is up */
2466                                 /* only if we have PCS_SYNC and not */
2467                                 /* receiving config code words */
2468                                 mac_status = tr32(MAC_STATUS);
2469                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2470                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2471                                         tg3_setup_flow_control(tp, 0, 0);
2472                                         current_link_up = 1;
2473                                 }
2474                         }
2475                 }
2476         }
2477
2478 out:
2479         return current_link_up;
2480 }
2481
2482 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2483 {
2484         int current_link_up = 0;
2485
2486         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2487                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2488                 goto out;
2489         }
2490
2491         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2492                 u32 flags;
2493                 int i;
2494   
2495                 if (fiber_autoneg(tp, &flags)) {
2496                         u32 local_adv, remote_adv;
2497
2498                         local_adv = ADVERTISE_PAUSE_CAP;
2499                         remote_adv = 0;
2500                         if (flags & MR_LP_ADV_SYM_PAUSE)
2501                                 remote_adv |= LPA_PAUSE_CAP;
2502                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2503                                 remote_adv |= LPA_PAUSE_ASYM;
2504
2505                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2506
2507                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2508                         current_link_up = 1;
2509                 }
2510                 for (i = 0; i < 30; i++) {
2511                         udelay(20);
2512                         tw32_f(MAC_STATUS,
2513                                (MAC_STATUS_SYNC_CHANGED |
2514                                 MAC_STATUS_CFG_CHANGED));
2515                         udelay(40);
2516                         if ((tr32(MAC_STATUS) &
2517                              (MAC_STATUS_SYNC_CHANGED |
2518                               MAC_STATUS_CFG_CHANGED)) == 0)
2519                                 break;
2520                 }
2521
2522                 mac_status = tr32(MAC_STATUS);
2523                 if (current_link_up == 0 &&
2524                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2525                     !(mac_status & MAC_STATUS_RCVD_CFG))
2526                         current_link_up = 1;
2527         } else {
2528                 /* Forcing 1000FD link up. */
2529                 current_link_up = 1;
2530                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2531
2532                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2533                 udelay(40);
2534         }
2535
2536 out:
2537         return current_link_up;
2538 }
2539
2540 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2541 {
2542         u32 orig_pause_cfg;
2543         u16 orig_active_speed;
2544         u8 orig_active_duplex;
2545         u32 mac_status;
2546         int current_link_up;
2547         int i;
2548
2549         orig_pause_cfg =
2550                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2551                                   TG3_FLAG_TX_PAUSE));
2552         orig_active_speed = tp->link_config.active_speed;
2553         orig_active_duplex = tp->link_config.active_duplex;
2554
2555         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2556             netif_carrier_ok(tp->dev) &&
2557             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2558                 mac_status = tr32(MAC_STATUS);
2559                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2560                                MAC_STATUS_SIGNAL_DET |
2561                                MAC_STATUS_CFG_CHANGED |
2562                                MAC_STATUS_RCVD_CFG);
2563                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2564                                    MAC_STATUS_SIGNAL_DET)) {
2565                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2566                                             MAC_STATUS_CFG_CHANGED));
2567                         return 0;
2568                 }
2569         }
2570
2571         tw32_f(MAC_TX_AUTO_NEG, 0);
2572
2573         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2574         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2575         tw32_f(MAC_MODE, tp->mac_mode);
2576         udelay(40);
2577
2578         if (tp->phy_id == PHY_ID_BCM8002)
2579                 tg3_init_bcm8002(tp);
2580
2581         /* Enable link change event even when serdes polling.  */
2582         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2583         udelay(40);
2584
2585         current_link_up = 0;
2586         mac_status = tr32(MAC_STATUS);
2587
2588         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2589                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2590         else
2591                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2592
2593         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2594         tw32_f(MAC_MODE, tp->mac_mode);
2595         udelay(40);
2596
2597         tp->hw_status->status =
2598                 (SD_STATUS_UPDATED |
2599                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2600
2601         for (i = 0; i < 100; i++) {
2602                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2603                                     MAC_STATUS_CFG_CHANGED));
2604                 udelay(5);
2605                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2606                                          MAC_STATUS_CFG_CHANGED)) == 0)
2607                         break;
2608         }
2609
2610         mac_status = tr32(MAC_STATUS);
2611         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2612                 current_link_up = 0;
2613                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2614                         tw32_f(MAC_MODE, (tp->mac_mode |
2615                                           MAC_MODE_SEND_CONFIGS));
2616                         udelay(1);
2617                         tw32_f(MAC_MODE, tp->mac_mode);
2618                 }
2619         }
2620
2621         if (current_link_up == 1) {
2622                 tp->link_config.active_speed = SPEED_1000;
2623                 tp->link_config.active_duplex = DUPLEX_FULL;
2624                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2625                                     LED_CTRL_LNKLED_OVERRIDE |
2626                                     LED_CTRL_1000MBPS_ON));
2627         } else {
2628                 tp->link_config.active_speed = SPEED_INVALID;
2629                 tp->link_config.active_duplex = DUPLEX_INVALID;
2630                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2631                                     LED_CTRL_LNKLED_OVERRIDE |
2632                                     LED_CTRL_TRAFFIC_OVERRIDE));
2633         }
2634
2635         if (current_link_up != netif_carrier_ok(tp->dev)) {
2636                 if (current_link_up)
2637                         netif_carrier_on(tp->dev);
2638                 else
2639                         netif_carrier_off(tp->dev);
2640                 tg3_link_report(tp);
2641         } else {
2642                 u32 now_pause_cfg =
2643                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2644                                          TG3_FLAG_TX_PAUSE);
2645                 if (orig_pause_cfg != now_pause_cfg ||
2646                     orig_active_speed != tp->link_config.active_speed ||
2647                     orig_active_duplex != tp->link_config.active_duplex)
2648                         tg3_link_report(tp);
2649         }
2650
2651         return 0;
2652 }
2653
2654 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2655 {
2656         int current_link_up, err = 0;
2657         u32 bmsr, bmcr;
2658         u16 current_speed;
2659         u8 current_duplex;
2660
2661         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2662         tw32_f(MAC_MODE, tp->mac_mode);
2663         udelay(40);
2664
2665         tw32(MAC_EVENT, 0);
2666
2667         tw32_f(MAC_STATUS,
2668              (MAC_STATUS_SYNC_CHANGED |
2669               MAC_STATUS_CFG_CHANGED |
2670               MAC_STATUS_MI_COMPLETION |
2671               MAC_STATUS_LNKSTATE_CHANGED));
2672         udelay(40);
2673
2674         if (force_reset)
2675                 tg3_phy_reset(tp);
2676
2677         current_link_up = 0;
2678         current_speed = SPEED_INVALID;
2679         current_duplex = DUPLEX_INVALID;
2680
2681         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2682         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2683
2684         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2685
2686         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2687             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2688                 /* do nothing, just check for link up at the end */
2689         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2690                 u32 adv, new_adv;
2691
2692                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2693                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2694                                   ADVERTISE_1000XPAUSE |
2695                                   ADVERTISE_1000XPSE_ASYM |
2696                                   ADVERTISE_SLCT);
2697
2698                 /* Always advertise symmetric PAUSE just like copper */
2699                 new_adv |= ADVERTISE_1000XPAUSE;
2700
2701                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2702                         new_adv |= ADVERTISE_1000XHALF;
2703                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2704                         new_adv |= ADVERTISE_1000XFULL;
2705
2706                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2707                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2708                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2709                         tg3_writephy(tp, MII_BMCR, bmcr);
2710
2711                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2712                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2713                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2714
2715                         return err;
2716                 }
2717         } else {
2718                 u32 new_bmcr;
2719
2720                 bmcr &= ~BMCR_SPEED1000;
2721                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2722
2723                 if (tp->link_config.duplex == DUPLEX_FULL)
2724                         new_bmcr |= BMCR_FULLDPLX;
2725
2726                 if (new_bmcr != bmcr) {
2727                         /* BMCR_SPEED1000 is a reserved bit that needs
2728                          * to be set on write.
2729                          */
2730                         new_bmcr |= BMCR_SPEED1000;
2731
2732                         /* Force a linkdown */
2733                         if (netif_carrier_ok(tp->dev)) {
2734                                 u32 adv;
2735
2736                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2737                                 adv &= ~(ADVERTISE_1000XFULL |
2738                                          ADVERTISE_1000XHALF |
2739                                          ADVERTISE_SLCT);
2740                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2741                                 tg3_writephy(tp, MII_BMCR, bmcr |
2742                                                            BMCR_ANRESTART |
2743                                                            BMCR_ANENABLE);
2744                                 udelay(10);
2745                                 netif_carrier_off(tp->dev);
2746                         }
2747                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2748                         bmcr = new_bmcr;
2749                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2750                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2751                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2752                 }
2753         }
2754
2755         if (bmsr & BMSR_LSTATUS) {
2756                 current_speed = SPEED_1000;
2757                 current_link_up = 1;
2758                 if (bmcr & BMCR_FULLDPLX)
2759                         current_duplex = DUPLEX_FULL;
2760                 else
2761                         current_duplex = DUPLEX_HALF;
2762
2763                 if (bmcr & BMCR_ANENABLE) {
2764                         u32 local_adv, remote_adv, common;
2765
2766                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2767                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2768                         common = local_adv & remote_adv;
2769                         if (common & (ADVERTISE_1000XHALF |
2770                                       ADVERTISE_1000XFULL)) {
2771                                 if (common & ADVERTISE_1000XFULL)
2772                                         current_duplex = DUPLEX_FULL;
2773                                 else
2774                                         current_duplex = DUPLEX_HALF;
2775
2776                                 tg3_setup_flow_control(tp, local_adv,
2777                                                        remote_adv);
2778                         }
2779                         else
2780                                 current_link_up = 0;
2781                 }
2782         }
2783
2784         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2785         if (tp->link_config.active_duplex == DUPLEX_HALF)
2786                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2787
2788         tw32_f(MAC_MODE, tp->mac_mode);
2789         udelay(40);
2790
2791         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2792
2793         tp->link_config.active_speed = current_speed;
2794         tp->link_config.active_duplex = current_duplex;
2795
2796         if (current_link_up != netif_carrier_ok(tp->dev)) {
2797                 if (current_link_up)
2798                         netif_carrier_on(tp->dev);
2799                 else {
2800                         netif_carrier_off(tp->dev);
2801                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2802                 }
2803                 tg3_link_report(tp);
2804         }
2805         return err;
2806 }
2807
2808 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2809 {
2810         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2811                 /* Give autoneg time to complete. */
2812                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2813                 return;
2814         }
2815         if (!netif_carrier_ok(tp->dev) &&
2816             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2817                 u32 bmcr;
2818
2819                 tg3_readphy(tp, MII_BMCR, &bmcr);
2820                 if (bmcr & BMCR_ANENABLE) {
2821                         u32 phy1, phy2;
2822
2823                         /* Select shadow register 0x1f */
2824                         tg3_writephy(tp, 0x1c, 0x7c00);
2825                         tg3_readphy(tp, 0x1c, &phy1);
2826
2827                         /* Select expansion interrupt status register */
2828                         tg3_writephy(tp, 0x17, 0x0f01);
2829                         tg3_readphy(tp, 0x15, &phy2);
2830                         tg3_readphy(tp, 0x15, &phy2);
2831
2832                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2833                                 /* We have signal detect and not receiving
2834                                  * config code words, link is up by parallel
2835                                  * detection.
2836                                  */
2837
2838                                 bmcr &= ~BMCR_ANENABLE;
2839                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2840                                 tg3_writephy(tp, MII_BMCR, bmcr);
2841                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2842                         }
2843                 }
2844         }
2845         else if (netif_carrier_ok(tp->dev) &&
2846                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2847                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2848                 u32 phy2;
2849
2850                 /* Select expansion interrupt status register */
2851                 tg3_writephy(tp, 0x17, 0x0f01);
2852                 tg3_readphy(tp, 0x15, &phy2);
2853                 if (phy2 & 0x20) {
2854                         u32 bmcr;
2855
2856                         /* Config code words received, turn on autoneg. */
2857                         tg3_readphy(tp, MII_BMCR, &bmcr);
2858                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2859
2860                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2861
2862                 }
2863         }
2864 }
2865
2866 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2867 {
2868         int err;
2869
2870         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2871                 err = tg3_setup_fiber_phy(tp, force_reset);
2872         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2873                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2874         } else {
2875                 err = tg3_setup_copper_phy(tp, force_reset);
2876         }
2877
2878         if (tp->link_config.active_speed == SPEED_1000 &&
2879             tp->link_config.active_duplex == DUPLEX_HALF)
2880                 tw32(MAC_TX_LENGTHS,
2881                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2882                       (6 << TX_LENGTHS_IPG_SHIFT) |
2883                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2884         else
2885                 tw32(MAC_TX_LENGTHS,
2886                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2887                       (6 << TX_LENGTHS_IPG_SHIFT) |
2888                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2889
2890         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2891                 if (netif_carrier_ok(tp->dev)) {
2892                         tw32(HOSTCC_STAT_COAL_TICKS,
2893                              tp->coal.stats_block_coalesce_usecs);
2894                 } else {
2895                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2896                 }
2897         }
2898
2899         return err;
2900 }
2901
2902 /* Tigon3 never reports partial packet sends.  So we do not
2903  * need special logic to handle SKBs that have not had all
2904  * of their frags sent yet, like SunGEM does.
2905  */
2906 static void tg3_tx(struct tg3 *tp)
2907 {
2908         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2909         u32 sw_idx = tp->tx_cons;
2910
2911         while (sw_idx != hw_idx) {
2912                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2913                 struct sk_buff *skb = ri->skb;
2914                 int i;
2915
2916                 if (unlikely(skb == NULL))
2917                         BUG();
2918
2919                 pci_unmap_single(tp->pdev,
2920                                  pci_unmap_addr(ri, mapping),
2921                                  skb_headlen(skb),
2922                                  PCI_DMA_TODEVICE);
2923
2924                 ri->skb = NULL;
2925
2926                 sw_idx = NEXT_TX(sw_idx);
2927
2928                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2929                         if (unlikely(sw_idx == hw_idx))
2930                                 BUG();
2931
2932                         ri = &tp->tx_buffers[sw_idx];
2933                         if (unlikely(ri->skb != NULL))
2934                                 BUG();
2935
2936                         pci_unmap_page(tp->pdev,
2937                                        pci_unmap_addr(ri, mapping),
2938                                        skb_shinfo(skb)->frags[i].size,
2939                                        PCI_DMA_TODEVICE);
2940
2941                         sw_idx = NEXT_TX(sw_idx);
2942                 }
2943
2944                 dev_kfree_skb(skb);
2945         }
2946
2947         tp->tx_cons = sw_idx;
2948
2949         if (unlikely(netif_queue_stopped(tp->dev))) {
2950                 spin_lock(&tp->tx_lock);
2951                 if (netif_queue_stopped(tp->dev) &&
2952                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2953                         netif_wake_queue(tp->dev);
2954                 spin_unlock(&tp->tx_lock);
2955         }
2956 }
2957
2958 /* Returns size of skb allocated or < 0 on error.
2959  *
2960  * We only need to fill in the address because the other members
2961  * of the RX descriptor are invariant, see tg3_init_rings.
2962  *
2963  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2964  * posting buffers we only dirty the first cache line of the RX
2965  * descriptor (containing the address).  Whereas for the RX status
2966  * buffers the cpu only reads the last cacheline of the RX descriptor
2967  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2968  */
2969 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2970                             int src_idx, u32 dest_idx_unmasked)
2971 {
2972         struct tg3_rx_buffer_desc *desc;
2973         struct ring_info *map, *src_map;
2974         struct sk_buff *skb;
2975         dma_addr_t mapping;
2976         int skb_size, dest_idx;
2977
2978         src_map = NULL;
2979         switch (opaque_key) {
2980         case RXD_OPAQUE_RING_STD:
2981                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2982                 desc = &tp->rx_std[dest_idx];
2983                 map = &tp->rx_std_buffers[dest_idx];
2984                 if (src_idx >= 0)
2985                         src_map = &tp->rx_std_buffers[src_idx];
2986                 skb_size = tp->rx_pkt_buf_sz;
2987                 break;
2988
2989         case RXD_OPAQUE_RING_JUMBO:
2990                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2991                 desc = &tp->rx_jumbo[dest_idx];
2992                 map = &tp->rx_jumbo_buffers[dest_idx];
2993                 if (src_idx >= 0)
2994                         src_map = &tp->rx_jumbo_buffers[src_idx];
2995                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2996                 break;
2997
2998         default:
2999                 return -EINVAL;
3000         };
3001
3002         /* Do not overwrite any of the map or rp information
3003          * until we are sure we can commit to a new buffer.
3004          *
3005          * Callers depend upon this behavior and assume that
3006          * we leave everything unchanged if we fail.
3007          */
3008         skb = dev_alloc_skb(skb_size);
3009         if (skb == NULL)
3010                 return -ENOMEM;
3011
3012         skb->dev = tp->dev;
3013         skb_reserve(skb, tp->rx_offset);
3014
3015         mapping = pci_map_single(tp->pdev, skb->data,
3016                                  skb_size - tp->rx_offset,
3017                                  PCI_DMA_FROMDEVICE);
3018
3019         map->skb = skb;
3020         pci_unmap_addr_set(map, mapping, mapping);
3021
3022         if (src_map != NULL)
3023                 src_map->skb = NULL;
3024
3025         desc->addr_hi = ((u64)mapping >> 32);
3026         desc->addr_lo = ((u64)mapping & 0xffffffff);
3027
3028         return skb_size;
3029 }
3030
3031 /* We only need to move over in the address because the other
3032  * members of the RX descriptor are invariant.  See notes above
3033  * tg3_alloc_rx_skb for full details.
3034  */
3035 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3036                            int src_idx, u32 dest_idx_unmasked)
3037 {
3038         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3039         struct ring_info *src_map, *dest_map;
3040         int dest_idx;
3041
3042         switch (opaque_key) {
3043         case RXD_OPAQUE_RING_STD:
3044                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3045                 dest_desc = &tp->rx_std[dest_idx];
3046                 dest_map = &tp->rx_std_buffers[dest_idx];
3047                 src_desc = &tp->rx_std[src_idx];
3048                 src_map = &tp->rx_std_buffers[src_idx];
3049                 break;
3050
3051         case RXD_OPAQUE_RING_JUMBO:
3052                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3053                 dest_desc = &tp->rx_jumbo[dest_idx];
3054                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3055                 src_desc = &tp->rx_jumbo[src_idx];
3056                 src_map = &tp->rx_jumbo_buffers[src_idx];
3057                 break;
3058
3059         default:
3060                 return;
3061         };
3062
3063         dest_map->skb = src_map->skb;
3064         pci_unmap_addr_set(dest_map, mapping,
3065                            pci_unmap_addr(src_map, mapping));
3066         dest_desc->addr_hi = src_desc->addr_hi;
3067         dest_desc->addr_lo = src_desc->addr_lo;
3068
3069         src_map->skb = NULL;
3070 }
3071
3072 #if TG3_VLAN_TAG_USED
3073 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3074 {
3075         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3076 }
3077 #endif
3078
3079 /* The RX ring scheme is composed of multiple rings which post fresh
3080  * buffers to the chip, and one special ring the chip uses to report
3081  * status back to the host.
3082  *
3083  * The special ring reports the status of received packets to the
3084  * host.  The chip does not write into the original descriptor the
3085  * RX buffer was obtained from.  The chip simply takes the original
3086  * descriptor as provided by the host, updates the status and length
3087  * field, then writes this into the next status ring entry.
3088  *
3089  * Each ring the host uses to post buffers to the chip is described
3090  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3091  * it is first placed into the on-chip ram.  When the packet's length
3092  * is known, it walks down the TG3_BDINFO entries to select the ring.
3093  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3094  * which is within the range of the new packet's length is chosen.
3095  *
3096  * The "separate ring for rx status" scheme may sound queer, but it makes
3097  * sense from a cache coherency perspective.  If only the host writes
3098  * to the buffer post rings, and only the chip writes to the rx status
3099  * rings, then cache lines never move beyond shared-modified state.
3100  * If both the host and chip were to write into the same ring, cache line
3101  * eviction could occur since both entities want it in an exclusive state.
3102  */
3103 static int tg3_rx(struct tg3 *tp, int budget)
3104 {
3105         u32 work_mask;
3106         u32 sw_idx = tp->rx_rcb_ptr;
3107         u16 hw_idx;
3108         int received;
3109
3110         hw_idx = tp->hw_status->idx[0].rx_producer;
3111         /*
3112          * We need to order the read of hw_idx and the read of
3113          * the opaque cookie.
3114          */
3115         rmb();
3116         work_mask = 0;
3117         received = 0;
3118         while (sw_idx != hw_idx && budget > 0) {
3119                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3120                 unsigned int len;
3121                 struct sk_buff *skb;
3122                 dma_addr_t dma_addr;
3123                 u32 opaque_key, desc_idx, *post_ptr;
3124
3125                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3126                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3127                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3128                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3129                                                   mapping);
3130                         skb = tp->rx_std_buffers[desc_idx].skb;
3131                         post_ptr = &tp->rx_std_ptr;
3132                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3133                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3134                                                   mapping);
3135                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3136                         post_ptr = &tp->rx_jumbo_ptr;
3137                 }
3138                 else {
3139                         goto next_pkt_nopost;
3140                 }
3141
3142                 work_mask |= opaque_key;
3143
3144                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3145                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3146                 drop_it:
3147                         tg3_recycle_rx(tp, opaque_key,
3148                                        desc_idx, *post_ptr);
3149                 drop_it_no_recycle:
3150                         /* Other statistics kept track of by card. */
3151                         tp->net_stats.rx_dropped++;
3152                         goto next_pkt;
3153                 }
3154
3155                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3156
3157                 if (len > RX_COPY_THRESHOLD 
3158                         && tp->rx_offset == 2
3159                         /* rx_offset != 2 iff this is a 5701 card running
3160                          * in PCI-X mode [see tg3_get_invariants()] */
3161                 ) {
3162                         int skb_size;
3163
3164                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3165                                                     desc_idx, *post_ptr);
3166                         if (skb_size < 0)
3167                                 goto drop_it;
3168
3169                         pci_unmap_single(tp->pdev, dma_addr,
3170                                          skb_size - tp->rx_offset,
3171                                          PCI_DMA_FROMDEVICE);
3172
3173                         skb_put(skb, len);
3174                 } else {
3175                         struct sk_buff *copy_skb;
3176
3177                         tg3_recycle_rx(tp, opaque_key,
3178                                        desc_idx, *post_ptr);
3179
3180                         copy_skb = dev_alloc_skb(len + 2);
3181                         if (copy_skb == NULL)
3182                                 goto drop_it_no_recycle;
3183
3184                         copy_skb->dev = tp->dev;
3185                         skb_reserve(copy_skb, 2);
3186                         skb_put(copy_skb, len);
3187                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3188                         memcpy(copy_skb->data, skb->data, len);
3189                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3190
3191                         /* We'll reuse the original ring buffer. */
3192                         skb = copy_skb;
3193                 }
3194
3195                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3196                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3197                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3198                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3199                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3200                 else
3201                         skb->ip_summed = CHECKSUM_NONE;
3202
3203                 skb->protocol = eth_type_trans(skb, tp->dev);
3204 #if TG3_VLAN_TAG_USED
3205                 if (tp->vlgrp != NULL &&
3206                     desc->type_flags & RXD_FLAG_VLAN) {
3207                         tg3_vlan_rx(tp, skb,
3208                                     desc->err_vlan & RXD_VLAN_MASK);
3209                 } else
3210 #endif
3211                         netif_receive_skb(skb);
3212
3213                 tp->dev->last_rx = jiffies;
3214                 received++;
3215                 budget--;
3216
3217 next_pkt:
3218                 (*post_ptr)++;
3219 next_pkt_nopost:
3220                 sw_idx++;
3221                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3222
3223                 /* Refresh hw_idx to see if there is new work */
3224                 if (sw_idx == hw_idx) {
3225                         hw_idx = tp->hw_status->idx[0].rx_producer;
3226                         rmb();
3227                 }
3228         }
3229
3230         /* ACK the status ring. */
3231         tp->rx_rcb_ptr = sw_idx;
3232         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3233
3234         /* Refill RX ring(s). */
3235         if (work_mask & RXD_OPAQUE_RING_STD) {
3236                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3237                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3238                              sw_idx);
3239         }
3240         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3241                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3242                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3243                              sw_idx);
3244         }
3245         mmiowb();
3246
3247         return received;
3248 }
3249
3250 static int tg3_poll(struct net_device *netdev, int *budget)
3251 {
3252         struct tg3 *tp = netdev_priv(netdev);
3253         struct tg3_hw_status *sblk = tp->hw_status;
3254         int done;
3255
3256         /* handle link change and other phy events */
3257         if (!(tp->tg3_flags &
3258               (TG3_FLAG_USE_LINKCHG_REG |
3259                TG3_FLAG_POLL_SERDES))) {
3260                 if (sblk->status & SD_STATUS_LINK_CHG) {
3261                         sblk->status = SD_STATUS_UPDATED |
3262                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3263                         spin_lock(&tp->lock);
3264                         tg3_setup_phy(tp, 0);
3265                         spin_unlock(&tp->lock);
3266                 }
3267         }
3268
3269         /* run TX completion thread */
3270         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3271                 tg3_tx(tp);
3272         }
3273
3274         /* run RX thread, within the bounds set by NAPI.
3275          * All RX "locking" is done by ensuring outside
3276          * code synchronizes with dev->poll()
3277          */
3278         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3279                 int orig_budget = *budget;
3280                 int work_done;
3281
3282                 if (orig_budget > netdev->quota)
3283                         orig_budget = netdev->quota;
3284
3285                 work_done = tg3_rx(tp, orig_budget);
3286
3287                 *budget -= work_done;
3288                 netdev->quota -= work_done;
3289         }
3290
3291         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3292                 tp->last_tag = sblk->status_tag;
3293                 rmb();
3294         } else
3295                 sblk->status &= ~SD_STATUS_UPDATED;
3296
3297         /* if no more work, tell net stack and NIC we're done */
3298         done = !tg3_has_work(tp);
3299         if (done) {
3300                 netif_rx_complete(netdev);
3301                 tg3_restart_ints(tp);
3302         }
3303
3304         return (done ? 0 : 1);
3305 }
3306
3307 static void tg3_irq_quiesce(struct tg3 *tp)
3308 {
3309         BUG_ON(tp->irq_sync);
3310
3311         tp->irq_sync = 1;
3312         smp_mb();
3313
3314         synchronize_irq(tp->pdev->irq);
3315 }
3316
3317 static inline int tg3_irq_sync(struct tg3 *tp)
3318 {
3319         return tp->irq_sync;
3320 }
3321
3322 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3323  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3324  * with as well.  Most of the time, this is not necessary except when
3325  * shutting down the device.
3326  */
3327 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3328 {
3329         if (irq_sync)
3330                 tg3_irq_quiesce(tp);
3331         spin_lock_bh(&tp->lock);
3332         spin_lock(&tp->tx_lock);
3333 }
3334
3335 static inline void tg3_full_unlock(struct tg3 *tp)
3336 {
3337         spin_unlock(&tp->tx_lock);
3338         spin_unlock_bh(&tp->lock);
3339 }
3340
3341 /* MSI ISR - No need to check for interrupt sharing and no need to
3342  * flush status block and interrupt mailbox. PCI ordering rules
3343  * guarantee that MSI will arrive after the status block.
3344  */
3345 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3346 {
3347         struct net_device *dev = dev_id;
3348         struct tg3 *tp = netdev_priv(dev);
3349
3350         prefetch(tp->hw_status);
3351         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3352         /*
3353          * Writing any value to intr-mbox-0 clears PCI INTA# and
3354          * chip-internal interrupt pending events.
3355          * Writing non-zero to intr-mbox-0 additional tells the
3356          * NIC to stop sending us irqs, engaging "in-intr-handler"
3357          * event coalescing.
3358          */
3359         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3360         if (likely(!tg3_irq_sync(tp)))
3361                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3362
3363         return IRQ_RETVAL(1);
3364 }
3365
3366 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3367 {
3368         struct net_device *dev = dev_id;
3369         struct tg3 *tp = netdev_priv(dev);
3370         struct tg3_hw_status *sblk = tp->hw_status;
3371         unsigned int handled = 1;
3372
3373         /* In INTx mode, it is possible for the interrupt to arrive at
3374          * the CPU before the status block posted prior to the interrupt.
3375          * Reading the PCI State register will confirm whether the
3376          * interrupt is ours and will flush the status block.
3377          */
3378         if ((sblk->status & SD_STATUS_UPDATED) ||
3379             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3380                 /*
3381                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3382                  * chip-internal interrupt pending events.
3383                  * Writing non-zero to intr-mbox-0 additional tells the
3384                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3385                  * event coalescing.
3386                  */
3387                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3388                              0x00000001);
3389                 if (tg3_irq_sync(tp))
3390                         goto out;
3391                 sblk->status &= ~SD_STATUS_UPDATED;
3392                 if (likely(tg3_has_work(tp))) {
3393                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3394                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3395                 } else {
3396                         /* No work, shared interrupt perhaps?  re-enable
3397                          * interrupts, and flush that PCI write
3398                          */
3399                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3400                                 0x00000000);
3401                 }
3402         } else {        /* shared interrupt */
3403                 handled = 0;
3404         }
3405 out:
3406         return IRQ_RETVAL(handled);
3407 }
3408
3409 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3410 {
3411         struct net_device *dev = dev_id;
3412         struct tg3 *tp = netdev_priv(dev);
3413         struct tg3_hw_status *sblk = tp->hw_status;
3414         unsigned int handled = 1;
3415
3416         /* In INTx mode, it is possible for the interrupt to arrive at
3417          * the CPU before the status block posted prior to the interrupt.
3418          * Reading the PCI State register will confirm whether the
3419          * interrupt is ours and will flush the status block.
3420          */
3421         if ((sblk->status_tag != tp->last_tag) ||
3422             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3423                 /*
3424                  * writing any value to intr-mbox-0 clears PCI INTA# and
3425                  * chip-internal interrupt pending events.
3426                  * writing non-zero to intr-mbox-0 additional tells the
3427                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3428                  * event coalescing.
3429                  */
3430                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3431                              0x00000001);
3432                 if (tg3_irq_sync(tp))
3433                         goto out;
3434                 if (netif_rx_schedule_prep(dev)) {
3435                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3436                         /* Update last_tag to mark that this status has been
3437                          * seen. Because interrupt may be shared, we may be
3438                          * racing with tg3_poll(), so only update last_tag
3439                          * if tg3_poll() is not scheduled.
3440                          */
3441                         tp->last_tag = sblk->status_tag;
3442                         __netif_rx_schedule(dev);
3443                 }
3444         } else {        /* shared interrupt */
3445                 handled = 0;
3446         }
3447 out:
3448         return IRQ_RETVAL(handled);
3449 }
3450
3451 /* ISR for interrupt test */
3452 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3453                 struct pt_regs *regs)
3454 {
3455         struct net_device *dev = dev_id;
3456         struct tg3 *tp = netdev_priv(dev);
3457         struct tg3_hw_status *sblk = tp->hw_status;
3458
3459         if ((sblk->status & SD_STATUS_UPDATED) ||
3460             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3461                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3462                              0x00000001);
3463                 return IRQ_RETVAL(1);
3464         }
3465         return IRQ_RETVAL(0);
3466 }
3467
3468 static int tg3_init_hw(struct tg3 *);
3469 static int tg3_halt(struct tg3 *, int, int);
3470
3471 #ifdef CONFIG_NET_POLL_CONTROLLER
3472 static void tg3_poll_controller(struct net_device *dev)
3473 {
3474         struct tg3 *tp = netdev_priv(dev);
3475
3476         tg3_interrupt(tp->pdev->irq, dev, NULL);
3477 }
3478 #endif
3479
3480 static void tg3_reset_task(void *_data)
3481 {
3482         struct tg3 *tp = _data;
3483         unsigned int restart_timer;
3484
3485         tg3_full_lock(tp, 0);
3486         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3487
3488         if (!netif_running(tp->dev)) {
3489                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3490                 tg3_full_unlock(tp);
3491                 return;
3492         }
3493
3494         tg3_full_unlock(tp);
3495
3496         tg3_netif_stop(tp);
3497
3498         tg3_full_lock(tp, 1);
3499
3500         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3501         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3502
3503         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3504         tg3_init_hw(tp);
3505
3506         tg3_netif_start(tp);
3507
3508         if (restart_timer)
3509                 mod_timer(&tp->timer, jiffies + 1);
3510
3511         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3512
3513         tg3_full_unlock(tp);
3514 }
3515
3516 static void tg3_tx_timeout(struct net_device *dev)
3517 {
3518         struct tg3 *tp = netdev_priv(dev);
3519
3520         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3521                dev->name);
3522
3523         schedule_work(&tp->reset_task);
3524 }
3525
3526 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3527 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3528 {
3529         u32 base = (u32) mapping & 0xffffffff;
3530
3531         return ((base > 0xffffdcc0) &&
3532                 (base + len + 8 < base));
3533 }
3534
3535 /* Test for DMA addresses > 40-bit */
3536 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3537                                           int len)
3538 {
3539 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3540         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3541                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3542         return 0;
3543 #else
3544         return 0;
3545 #endif
3546 }
3547
3548 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3549
3550 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3551 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3552                                        u32 last_plus_one, u32 *start,
3553                                        u32 base_flags, u32 mss)
3554 {
3555         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3556         dma_addr_t new_addr = 0;
3557         u32 entry = *start;
3558         int i, ret = 0;
3559
3560         if (!new_skb) {
3561                 ret = -1;
3562         } else {
3563                 /* New SKB is guaranteed to be linear. */
3564                 entry = *start;
3565                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3566                                           PCI_DMA_TODEVICE);
3567                 /* Make sure new skb does not cross any 4G boundaries.
3568                  * Drop the packet if it does.
3569                  */
3570                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3571                         ret = -1;
3572                         dev_kfree_skb(new_skb);
3573                         new_skb = NULL;
3574                 } else {
3575                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3576                                     base_flags, 1 | (mss << 1));
3577                         *start = NEXT_TX(entry);
3578                 }
3579         }
3580
3581         /* Now clean up the sw ring entries. */
3582         i = 0;
3583         while (entry != last_plus_one) {
3584                 int len;
3585
3586                 if (i == 0)
3587                         len = skb_headlen(skb);
3588                 else
3589                         len = skb_shinfo(skb)->frags[i-1].size;
3590                 pci_unmap_single(tp->pdev,
3591                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3592                                  len, PCI_DMA_TODEVICE);
3593                 if (i == 0) {
3594                         tp->tx_buffers[entry].skb = new_skb;
3595                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3596                 } else {
3597                         tp->tx_buffers[entry].skb = NULL;
3598                 }
3599                 entry = NEXT_TX(entry);
3600                 i++;
3601         }
3602
3603         dev_kfree_skb(skb);
3604
3605         return ret;
3606 }
3607
3608 static void tg3_set_txd(struct tg3 *tp, int entry,
3609                         dma_addr_t mapping, int len, u32 flags,
3610                         u32 mss_and_is_end)
3611 {
3612         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3613         int is_end = (mss_and_is_end & 0x1);
3614         u32 mss = (mss_and_is_end >> 1);
3615         u32 vlan_tag = 0;
3616
3617         if (is_end)
3618                 flags |= TXD_FLAG_END;
3619         if (flags & TXD_FLAG_VLAN) {
3620                 vlan_tag = flags >> 16;
3621                 flags &= 0xffff;
3622         }
3623         vlan_tag |= (mss << TXD_MSS_SHIFT);
3624
3625         txd->addr_hi = ((u64) mapping >> 32);
3626         txd->addr_lo = ((u64) mapping & 0xffffffff);
3627         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3628         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3629 }
3630
3631 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3632 {
3633         struct tg3 *tp = netdev_priv(dev);
3634         dma_addr_t mapping;
3635         u32 len, entry, base_flags, mss;
3636         int would_hit_hwbug;
3637
3638         len = skb_headlen(skb);
3639
3640         /* No BH disabling for tx_lock here.  We are running in BH disabled
3641          * context and TX reclaim runs via tp->poll inside of a software
3642          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3643          * no IRQ context deadlocks to worry about either.  Rejoice!
3644          */
3645         if (!spin_trylock(&tp->tx_lock))
3646                 return NETDEV_TX_LOCKED; 
3647
3648         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3649                 if (!netif_queue_stopped(dev)) {
3650                         netif_stop_queue(dev);
3651
3652                         /* This is a hard error, log it. */
3653                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3654                                "queue awake!\n", dev->name);
3655                 }
3656                 spin_unlock(&tp->tx_lock);
3657                 return NETDEV_TX_BUSY;
3658         }
3659
3660         entry = tp->tx_prod;
3661         base_flags = 0;
3662         if (skb->ip_summed == CHECKSUM_HW)
3663                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3664 #if TG3_TSO_SUPPORT != 0
3665         mss = 0;
3666         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3667             (mss = skb_shinfo(skb)->tso_size) != 0) {
3668                 int tcp_opt_len, ip_tcp_len;
3669
3670                 if (skb_header_cloned(skb) &&
3671                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3672                         dev_kfree_skb(skb);
3673                         goto out_unlock;
3674                 }
3675
3676                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3677                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3678
3679                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3680                                TXD_FLAG_CPU_POST_DMA);
3681
3682                 skb->nh.iph->check = 0;
3683                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3684                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3685                         skb->h.th->check = 0;
3686                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3687                 }
3688                 else {
3689                         skb->h.th->check =
3690                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3691                                                    skb->nh.iph->daddr,
3692                                                    0, IPPROTO_TCP, 0);
3693                 }
3694
3695                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3696                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3697                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3698                                 int tsflags;
3699
3700                                 tsflags = ((skb->nh.iph->ihl - 5) +
3701                                            (tcp_opt_len >> 2));
3702                                 mss |= (tsflags << 11);
3703                         }
3704                 } else {
3705                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3706                                 int tsflags;
3707
3708                                 tsflags = ((skb->nh.iph->ihl - 5) +
3709                                            (tcp_opt_len >> 2));
3710                                 base_flags |= tsflags << 12;
3711                         }
3712                 }
3713         }
3714 #else
3715         mss = 0;
3716 #endif
3717 #if TG3_VLAN_TAG_USED
3718         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3719                 base_flags |= (TXD_FLAG_VLAN |
3720                                (vlan_tx_tag_get(skb) << 16));
3721 #endif
3722
3723         /* Queue skb data, a.k.a. the main skb fragment. */
3724         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3725
3726         tp->tx_buffers[entry].skb = skb;
3727         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3728
3729         would_hit_hwbug = 0;
3730
3731         if (tg3_4g_overflow_test(mapping, len))
3732                 would_hit_hwbug = 1;
3733
3734         tg3_set_txd(tp, entry, mapping, len, base_flags,
3735                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3736
3737         entry = NEXT_TX(entry);
3738
3739         /* Now loop through additional data fragments, and queue them. */
3740         if (skb_shinfo(skb)->nr_frags > 0) {
3741                 unsigned int i, last;
3742
3743                 last = skb_shinfo(skb)->nr_frags - 1;
3744                 for (i = 0; i <= last; i++) {
3745                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3746
3747                         len = frag->size;
3748                         mapping = pci_map_page(tp->pdev,
3749                                                frag->page,
3750                                                frag->page_offset,
3751                                                len, PCI_DMA_TODEVICE);
3752
3753                         tp->tx_buffers[entry].skb = NULL;
3754                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3755
3756                         if (tg3_4g_overflow_test(mapping, len))
3757                                 would_hit_hwbug = 1;
3758
3759                         if (tg3_40bit_overflow_test(tp, mapping, len))
3760                                 would_hit_hwbug = 1;
3761
3762                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3763                                 tg3_set_txd(tp, entry, mapping, len,
3764                                             base_flags, (i == last)|(mss << 1));
3765                         else
3766                                 tg3_set_txd(tp, entry, mapping, len,
3767                                             base_flags, (i == last));
3768
3769                         entry = NEXT_TX(entry);
3770                 }
3771         }
3772
3773         if (would_hit_hwbug) {
3774                 u32 last_plus_one = entry;
3775                 u32 start;
3776
3777                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3778                 start &= (TG3_TX_RING_SIZE - 1);
3779
3780                 /* If the workaround fails due to memory/mapping
3781                  * failure, silently drop this packet.
3782                  */
3783                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3784                                                 &start, base_flags, mss))
3785                         goto out_unlock;
3786
3787                 entry = start;
3788         }
3789
3790         /* Packets are ready, update Tx producer idx local and on card. */
3791         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3792
3793         tp->tx_prod = entry;
3794         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3795                 netif_stop_queue(dev);
3796                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3797                         netif_wake_queue(tp->dev);
3798         }
3799
3800 out_unlock:
3801         mmiowb();
3802         spin_unlock(&tp->tx_lock);
3803
3804         dev->trans_start = jiffies;
3805
3806         return NETDEV_TX_OK;
3807 }
3808
3809 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3810                                int new_mtu)
3811 {
3812         dev->mtu = new_mtu;
3813
3814         if (new_mtu > ETH_DATA_LEN) {
3815                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3816                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3817                         ethtool_op_set_tso(dev, 0);
3818                 }
3819                 else
3820                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3821         } else {
3822                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3823                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3824                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3825         }
3826 }
3827
3828 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3829 {
3830         struct tg3 *tp = netdev_priv(dev);
3831
3832         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3833                 return -EINVAL;
3834
3835         if (!netif_running(dev)) {
3836                 /* We'll just catch it later when the
3837                  * device is up'd.
3838                  */
3839                 tg3_set_mtu(dev, tp, new_mtu);
3840                 return 0;
3841         }
3842
3843         tg3_netif_stop(tp);
3844
3845         tg3_full_lock(tp, 1);
3846
3847         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3848
3849         tg3_set_mtu(dev, tp, new_mtu);
3850
3851         tg3_init_hw(tp);
3852
3853         tg3_netif_start(tp);
3854
3855         tg3_full_unlock(tp);
3856
3857         return 0;
3858 }
3859
3860 /* Free up pending packets in all rx/tx rings.
3861  *
3862  * The chip has been shut down and the driver detached from
3863  * the networking, so no interrupts or new tx packets will
3864  * end up in the driver.  tp->{tx,}lock is not held and we are not
3865  * in an interrupt context and thus may sleep.
3866  */
3867 static void tg3_free_rings(struct tg3 *tp)
3868 {
3869         struct ring_info *rxp;
3870         int i;
3871
3872         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3873                 rxp = &tp->rx_std_buffers[i];
3874
3875                 if (rxp->skb == NULL)
3876                         continue;
3877                 pci_unmap_single(tp->pdev,
3878                                  pci_unmap_addr(rxp, mapping),
3879                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3880                                  PCI_DMA_FROMDEVICE);
3881                 dev_kfree_skb_any(rxp->skb);
3882                 rxp->skb = NULL;
3883         }
3884
3885         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3886                 rxp = &tp->rx_jumbo_buffers[i];
3887
3888                 if (rxp->skb == NULL)
3889                         continue;
3890                 pci_unmap_single(tp->pdev,
3891                                  pci_unmap_addr(rxp, mapping),
3892                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3893                                  PCI_DMA_FROMDEVICE);
3894                 dev_kfree_skb_any(rxp->skb);
3895                 rxp->skb = NULL;
3896         }
3897
3898         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3899                 struct tx_ring_info *txp;
3900                 struct sk_buff *skb;
3901                 int j;
3902
3903                 txp = &tp->tx_buffers[i];
3904                 skb = txp->skb;
3905
3906                 if (skb == NULL) {
3907                         i++;
3908                         continue;
3909                 }
3910
3911                 pci_unmap_single(tp->pdev,
3912                                  pci_unmap_addr(txp, mapping),
3913                                  skb_headlen(skb),
3914                                  PCI_DMA_TODEVICE);
3915                 txp->skb = NULL;
3916
3917                 i++;
3918
3919                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3920                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3921                         pci_unmap_page(tp->pdev,
3922                                        pci_unmap_addr(txp, mapping),
3923                                        skb_shinfo(skb)->frags[j].size,
3924                                        PCI_DMA_TODEVICE);
3925                         i++;
3926                 }
3927
3928                 dev_kfree_skb_any(skb);
3929         }
3930 }
3931
3932 /* Initialize tx/rx rings for packet processing.
3933  *
3934  * The chip has been shut down and the driver detached from
3935  * the networking, so no interrupts or new tx packets will
3936  * end up in the driver.  tp->{tx,}lock are held and thus
3937  * we may not sleep.
3938  */
3939 static void tg3_init_rings(struct tg3 *tp)
3940 {
3941         u32 i;
3942
3943         /* Free up all the SKBs. */
3944         tg3_free_rings(tp);
3945
3946         /* Zero out all descriptors. */
3947         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3948         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3949         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3950         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3951
3952         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3953         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3954             (tp->dev->mtu > ETH_DATA_LEN))
3955                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3956
3957         /* Initialize invariants of the rings, we only set this
3958          * stuff once.  This works because the card does not
3959          * write into the rx buffer posting rings.
3960          */
3961         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3962                 struct tg3_rx_buffer_desc *rxd;
3963
3964                 rxd = &tp->rx_std[i];
3965                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3966                         << RXD_LEN_SHIFT;
3967                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3968                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3969                                (i << RXD_OPAQUE_INDEX_SHIFT));
3970         }
3971
3972         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3973                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3974                         struct tg3_rx_buffer_desc *rxd;
3975
3976                         rxd = &tp->rx_jumbo[i];
3977                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3978                                 << RXD_LEN_SHIFT;
3979                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3980                                 RXD_FLAG_JUMBO;
3981                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3982                                (i << RXD_OPAQUE_INDEX_SHIFT));
3983                 }
3984         }
3985
3986         /* Now allocate fresh SKBs for each rx ring. */
3987         for (i = 0; i < tp->rx_pending; i++) {
3988                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3989                                      -1, i) < 0)
3990                         break;
3991         }
3992
3993         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3994                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3995                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3996                                              -1, i) < 0)
3997                                 break;
3998                 }
3999         }
4000 }
4001
4002 /*
4003  * Must not be invoked with interrupt sources disabled and
4004  * the hardware shutdown down.
4005  */
4006 static void tg3_free_consistent(struct tg3 *tp)
4007 {
4008         kfree(tp->rx_std_buffers);
4009         tp->rx_std_buffers = NULL;
4010         if (tp->rx_std) {
4011                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4012                                     tp->rx_std, tp->rx_std_mapping);
4013                 tp->rx_std = NULL;
4014         }
4015         if (tp->rx_jumbo) {
4016                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4017                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4018                 tp->rx_jumbo = NULL;
4019         }
4020         if (tp->rx_rcb) {
4021                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4022                                     tp->rx_rcb, tp->rx_rcb_mapping);
4023                 tp->rx_rcb = NULL;
4024         }
4025         if (tp->tx_ring) {
4026                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4027                         tp->tx_ring, tp->tx_desc_mapping);
4028                 tp->tx_ring = NULL;
4029         }
4030         if (tp->hw_status) {
4031                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4032                                     tp->hw_status, tp->status_mapping);
4033                 tp->hw_status = NULL;
4034         }
4035         if (tp->hw_stats) {
4036                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4037                                     tp->hw_stats, tp->stats_mapping);
4038                 tp->hw_stats = NULL;
4039         }
4040 }
4041
4042 /*
4043  * Must not be invoked with interrupt sources disabled and
4044  * the hardware shutdown down.  Can sleep.
4045  */
4046 static int tg3_alloc_consistent(struct tg3 *tp)
4047 {
4048         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4049                                       (TG3_RX_RING_SIZE +
4050                                        TG3_RX_JUMBO_RING_SIZE)) +
4051                                      (sizeof(struct tx_ring_info) *
4052                                       TG3_TX_RING_SIZE),
4053                                      GFP_KERNEL);
4054         if (!tp->rx_std_buffers)
4055                 return -ENOMEM;
4056
4057         memset(tp->rx_std_buffers, 0,
4058                (sizeof(struct ring_info) *
4059                 (TG3_RX_RING_SIZE +
4060                  TG3_RX_JUMBO_RING_SIZE)) +
4061                (sizeof(struct tx_ring_info) *
4062                 TG3_TX_RING_SIZE));
4063
4064         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4065         tp->tx_buffers = (struct tx_ring_info *)
4066                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4067
4068         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4069                                           &tp->rx_std_mapping);
4070         if (!tp->rx_std)
4071                 goto err_out;
4072
4073         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4074                                             &tp->rx_jumbo_mapping);
4075
4076         if (!tp->rx_jumbo)
4077                 goto err_out;
4078
4079         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4080                                           &tp->rx_rcb_mapping);
4081         if (!tp->rx_rcb)
4082                 goto err_out;
4083
4084         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4085                                            &tp->tx_desc_mapping);
4086         if (!tp->tx_ring)
4087                 goto err_out;
4088
4089         tp->hw_status = pci_alloc_consistent(tp->pdev,
4090                                              TG3_HW_STATUS_SIZE,
4091                                              &tp->status_mapping);
4092         if (!tp->hw_status)
4093                 goto err_out;
4094
4095         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4096                                             sizeof(struct tg3_hw_stats),
4097                                             &tp->stats_mapping);
4098         if (!tp->hw_stats)
4099                 goto err_out;
4100
4101         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4102         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4103
4104         return 0;
4105
4106 err_out:
4107         tg3_free_consistent(tp);
4108         return -ENOMEM;
4109 }
4110
4111 #define MAX_WAIT_CNT 1000
4112
4113 /* To stop a block, clear the enable bit and poll till it
4114  * clears.  tp->lock is held.
4115  */
4116 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4117 {
4118         unsigned int i;
4119         u32 val;
4120
4121         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4122                 switch (ofs) {
4123                 case RCVLSC_MODE:
4124                 case DMAC_MODE:
4125                 case MBFREE_MODE:
4126                 case BUFMGR_MODE:
4127                 case MEMARB_MODE:
4128                         /* We can't enable/disable these bits of the
4129                          * 5705/5750, just say success.
4130                          */
4131                         return 0;
4132
4133                 default:
4134                         break;
4135                 };
4136         }
4137
4138         val = tr32(ofs);
4139         val &= ~enable_bit;
4140         tw32_f(ofs, val);
4141
4142         for (i = 0; i < MAX_WAIT_CNT; i++) {
4143                 udelay(100);
4144                 val = tr32(ofs);
4145                 if ((val & enable_bit) == 0)
4146                         break;
4147         }
4148
4149         if (i == MAX_WAIT_CNT && !silent) {
4150                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4151                        "ofs=%lx enable_bit=%x\n",
4152                        ofs, enable_bit);
4153                 return -ENODEV;
4154         }
4155
4156         return 0;
4157 }
4158
4159 /* tp->lock is held. */
4160 static int tg3_abort_hw(struct tg3 *tp, int silent)
4161 {
4162         int i, err;
4163
4164         tg3_disable_ints(tp);
4165
4166         tp->rx_mode &= ~RX_MODE_ENABLE;
4167         tw32_f(MAC_RX_MODE, tp->rx_mode);
4168         udelay(10);
4169
4170         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4171         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4172         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4173         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4174         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4175         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4176
4177         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4178         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4179         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4180         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4181         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4182         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4183         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4184
4185         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4186         tw32_f(MAC_MODE, tp->mac_mode);
4187         udelay(40);
4188
4189         tp->tx_mode &= ~TX_MODE_ENABLE;
4190         tw32_f(MAC_TX_MODE, tp->tx_mode);
4191
4192         for (i = 0; i < MAX_WAIT_CNT; i++) {
4193                 udelay(100);
4194                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4195                         break;
4196         }
4197         if (i >= MAX_WAIT_CNT) {
4198                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4199                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4200                        tp->dev->name, tr32(MAC_TX_MODE));
4201                 err |= -ENODEV;
4202         }
4203
4204         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4205         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4206         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4207
4208         tw32(FTQ_RESET, 0xffffffff);
4209         tw32(FTQ_RESET, 0x00000000);
4210
4211         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4212         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4213
4214         if (tp->hw_status)
4215                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4216         if (tp->hw_stats)
4217                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4218
4219         return err;
4220 }
4221
4222 /* tp->lock is held. */
4223 static int tg3_nvram_lock(struct tg3 *tp)
4224 {
4225         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4226                 int i;
4227
4228                 if (tp->nvram_lock_cnt == 0) {
4229                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4230                         for (i = 0; i < 8000; i++) {
4231                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4232                                         break;
4233                                 udelay(20);
4234                         }
4235                         if (i == 8000) {
4236                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4237                                 return -ENODEV;
4238                         }
4239                 }
4240                 tp->nvram_lock_cnt++;
4241         }
4242         return 0;
4243 }
4244
4245 /* tp->lock is held. */
4246 static void tg3_nvram_unlock(struct tg3 *tp)
4247 {
4248         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4249                 if (tp->nvram_lock_cnt > 0)
4250                         tp->nvram_lock_cnt--;
4251                 if (tp->nvram_lock_cnt == 0)
4252                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4253         }
4254 }
4255
4256 /* tp->lock is held. */
4257 static void tg3_enable_nvram_access(struct tg3 *tp)
4258 {
4259         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4260             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4261                 u32 nvaccess = tr32(NVRAM_ACCESS);
4262
4263                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4264         }
4265 }
4266
4267 /* tp->lock is held. */
4268 static void tg3_disable_nvram_access(struct tg3 *tp)
4269 {
4270         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4271             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4272                 u32 nvaccess = tr32(NVRAM_ACCESS);
4273
4274                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4275         }
4276 }
4277
4278 /* tp->lock is held. */
4279 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4280 {
4281         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4282                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4283                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4284
4285         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4286                 switch (kind) {
4287                 case RESET_KIND_INIT:
4288                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4289                                       DRV_STATE_START);
4290                         break;
4291
4292                 case RESET_KIND_SHUTDOWN:
4293                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4294                                       DRV_STATE_UNLOAD);
4295                         break;
4296
4297                 case RESET_KIND_SUSPEND:
4298                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4299                                       DRV_STATE_SUSPEND);
4300                         break;
4301
4302                 default:
4303                         break;
4304                 };
4305         }
4306 }
4307
4308 /* tp->lock is held. */
4309 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4310 {
4311         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4312                 switch (kind) {
4313                 case RESET_KIND_INIT:
4314                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4315                                       DRV_STATE_START_DONE);
4316                         break;
4317
4318                 case RESET_KIND_SHUTDOWN:
4319                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4320                                       DRV_STATE_UNLOAD_DONE);
4321                         break;
4322
4323                 default:
4324                         break;
4325                 };
4326         }
4327 }
4328
4329 /* tp->lock is held. */
4330 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4331 {
4332         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4333                 switch (kind) {
4334                 case RESET_KIND_INIT:
4335                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4336                                       DRV_STATE_START);
4337                         break;
4338
4339                 case RESET_KIND_SHUTDOWN:
4340                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4341                                       DRV_STATE_UNLOAD);
4342                         break;
4343
4344                 case RESET_KIND_SUSPEND:
4345                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4346                                       DRV_STATE_SUSPEND);
4347                         break;
4348
4349                 default:
4350                         break;
4351                 };
4352         }
4353 }
4354
4355 static void tg3_stop_fw(struct tg3 *);
4356
4357 /* tp->lock is held. */
4358 static int tg3_chip_reset(struct tg3 *tp)
4359 {
4360         u32 val;
4361         void (*write_op)(struct tg3 *, u32, u32);
4362         int i;
4363
4364         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4365                 tg3_nvram_lock(tp);
4366                 /* No matching tg3_nvram_unlock() after this because
4367                  * chip reset below will undo the nvram lock.
4368                  */
4369                 tp->nvram_lock_cnt = 0;
4370         }
4371
4372         /*
4373          * We must avoid the readl() that normally takes place.
4374          * It locks machines, causes machine checks, and other
4375          * fun things.  So, temporarily disable the 5701
4376          * hardware workaround, while we do the reset.
4377          */
4378         write_op = tp->write32;
4379         if (write_op == tg3_write_flush_reg32)
4380                 tp->write32 = tg3_write32;
4381
4382         /* do the reset */
4383         val = GRC_MISC_CFG_CORECLK_RESET;
4384
4385         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4386                 if (tr32(0x7e2c) == 0x60) {
4387                         tw32(0x7e2c, 0x20);
4388                 }
4389                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4390                         tw32(GRC_MISC_CFG, (1 << 29));
4391                         val |= (1 << 29);
4392                 }
4393         }
4394
4395         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4396                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4397         tw32(GRC_MISC_CFG, val);
4398
4399         /* restore 5701 hardware bug workaround write method */
4400         tp->write32 = write_op;
4401
4402         /* Unfortunately, we have to delay before the PCI read back.
4403          * Some 575X chips even will not respond to a PCI cfg access
4404          * when the reset command is given to the chip.
4405          *
4406          * How do these hardware designers expect things to work
4407          * properly if the PCI write is posted for a long period
4408          * of time?  It is always necessary to have some method by
4409          * which a register read back can occur to push the write
4410          * out which does the reset.
4411          *
4412          * For most tg3 variants the trick below was working.
4413          * Ho hum...
4414          */
4415         udelay(120);
4416
4417         /* Flush PCI posted writes.  The normal MMIO registers
4418          * are inaccessible at this time so this is the only
4419          * way to make this reliably (actually, this is no longer
4420          * the case, see above).  I tried to use indirect
4421          * register read/write but this upset some 5701 variants.
4422          */
4423         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4424
4425         udelay(120);
4426
4427         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4428                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4429                         int i;
4430                         u32 cfg_val;
4431
4432                         /* Wait for link training to complete.  */
4433                         for (i = 0; i < 5000; i++)
4434                                 udelay(100);
4435
4436                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4437                         pci_write_config_dword(tp->pdev, 0xc4,
4438                                                cfg_val | (1 << 15));
4439                 }
4440                 /* Set PCIE max payload size and clear error status.  */
4441                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4442         }
4443
4444         /* Re-enable indirect register accesses. */
4445         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4446                                tp->misc_host_ctrl);
4447
4448         /* Set MAX PCI retry to zero. */
4449         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4450         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4451             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4452                 val |= PCISTATE_RETRY_SAME_DMA;
4453         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4454
4455         pci_restore_state(tp->pdev);
4456
4457         /* Make sure PCI-X relaxed ordering bit is clear. */
4458         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4459         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4460         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4461
4462         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4463                 u32 val;
4464
4465                 /* Chip reset on 5780 will reset MSI enable bit,
4466                  * so need to restore it.
4467                  */
4468                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4469                         u16 ctrl;
4470
4471                         pci_read_config_word(tp->pdev,
4472                                              tp->msi_cap + PCI_MSI_FLAGS,
4473                                              &ctrl);
4474                         pci_write_config_word(tp->pdev,
4475                                               tp->msi_cap + PCI_MSI_FLAGS,
4476                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4477                         val = tr32(MSGINT_MODE);
4478                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4479                 }
4480
4481                 val = tr32(MEMARB_MODE);
4482                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4483
4484         } else
4485                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4486
4487         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4488                 tg3_stop_fw(tp);
4489                 tw32(0x5000, 0x400);
4490         }
4491
4492         tw32(GRC_MODE, tp->grc_mode);
4493
4494         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4495                 u32 val = tr32(0xc4);
4496
4497                 tw32(0xc4, val | (1 << 15));
4498         }
4499
4500         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4502                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4503                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4504                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4505                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4506         }
4507
4508         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4509                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4510                 tw32_f(MAC_MODE, tp->mac_mode);
4511         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4512                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4513                 tw32_f(MAC_MODE, tp->mac_mode);
4514         } else
4515                 tw32_f(MAC_MODE, 0);
4516         udelay(40);
4517
4518         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4519                 /* Wait for firmware initialization to complete. */
4520                 for (i = 0; i < 100000; i++) {
4521                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4522                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4523                                 break;
4524                         udelay(10);
4525                 }
4526                 if (i >= 100000) {
4527                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4528                                "firmware will not restart magic=%08x\n",
4529                                tp->dev->name, val);
4530                         return -ENODEV;
4531                 }
4532         }
4533
4534         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4535             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4536                 u32 val = tr32(0x7c00);
4537
4538                 tw32(0x7c00, val | (1 << 25));
4539         }
4540
4541         /* Reprobe ASF enable state.  */
4542         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4543         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4544         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4545         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4546                 u32 nic_cfg;
4547
4548                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4549                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4550                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4551                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4552                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4553                 }
4554         }
4555
4556         return 0;
4557 }
4558
4559 /* tp->lock is held. */
4560 static void tg3_stop_fw(struct tg3 *tp)
4561 {
4562         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4563                 u32 val;
4564                 int i;
4565
4566                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4567                 val = tr32(GRC_RX_CPU_EVENT);
4568                 val |= (1 << 14);
4569                 tw32(GRC_RX_CPU_EVENT, val);
4570
4571                 /* Wait for RX cpu to ACK the event.  */
4572                 for (i = 0; i < 100; i++) {
4573                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4574                                 break;
4575                         udelay(1);
4576                 }
4577         }
4578 }
4579
4580 /* tp->lock is held. */
4581 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4582 {
4583         int err;
4584
4585         tg3_stop_fw(tp);
4586
4587         tg3_write_sig_pre_reset(tp, kind);
4588
4589         tg3_abort_hw(tp, silent);
4590         err = tg3_chip_reset(tp);
4591
4592         tg3_write_sig_legacy(tp, kind);
4593         tg3_write_sig_post_reset(tp, kind);
4594
4595         if (err)
4596                 return err;
4597
4598         return 0;
4599 }
4600
4601 #define TG3_FW_RELEASE_MAJOR    0x0
4602 #define TG3_FW_RELASE_MINOR     0x0
4603 #define TG3_FW_RELEASE_FIX      0x0
4604 #define TG3_FW_START_ADDR       0x08000000
4605 #define TG3_FW_TEXT_ADDR        0x08000000
4606 #define TG3_FW_TEXT_LEN         0x9c0
4607 #define TG3_FW_RODATA_ADDR      0x080009c0
4608 #define TG3_FW_RODATA_LEN       0x60
4609 #define TG3_FW_DATA_ADDR        0x08000a40
4610 #define TG3_FW_DATA_LEN         0x20
4611 #define TG3_FW_SBSS_ADDR        0x08000a60
4612 #define TG3_FW_SBSS_LEN         0xc
4613 #define TG3_FW_BSS_ADDR         0x08000a70
4614 #define TG3_FW_BSS_LEN          0x10
4615
4616 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4617         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4618         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4619         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4620         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4621         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4622         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4623         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4624         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4625         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4626         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4627         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4628         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4629         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4630         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4631         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4632         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4633         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4634         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4635         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4636         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4637         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4638         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4639         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4640         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4641         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4642         0, 0, 0, 0, 0, 0,
4643         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4644         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4645         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4646         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4647         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4648         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4649         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4650         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4651         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4652         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4653         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4654         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4655         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4656         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4657         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4658         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4659         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4660         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4661         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4662         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4663         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4664         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4665         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4666         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4667         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4668         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4669         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4670         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4671         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4672         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4673         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4674         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4675         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4676         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4677         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4678         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4679         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4680         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4681         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4682         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4683         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4684         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4685         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4686         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4687         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4688         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4689         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4690         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4691         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4692         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4693         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4694         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4695         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4696         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4697         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4698         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4699         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4700         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4701         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4702         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4703         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4704         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4705         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4706         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4707         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4708 };
4709
4710 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4711         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4712         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4713         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4714         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4715         0x00000000
4716 };
4717
4718 #if 0 /* All zeros, don't eat up space with it. */
4719 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4720         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4721         0x00000000, 0x00000000, 0x00000000, 0x00000000
4722 };
4723 #endif
4724
4725 #define RX_CPU_SCRATCH_BASE     0x30000
4726 #define RX_CPU_SCRATCH_SIZE     0x04000
4727 #define TX_CPU_SCRATCH_BASE     0x34000
4728 #define TX_CPU_SCRATCH_SIZE     0x04000
4729
4730 /* tp->lock is held. */
4731 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4732 {
4733         int i;
4734
4735         if (offset == TX_CPU_BASE &&
4736             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4737                 BUG();
4738
4739         if (offset == RX_CPU_BASE) {
4740                 for (i = 0; i < 10000; i++) {
4741                         tw32(offset + CPU_STATE, 0xffffffff);
4742                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4743                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4744                                 break;
4745                 }
4746
4747                 tw32(offset + CPU_STATE, 0xffffffff);
4748                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4749                 udelay(10);
4750         } else {
4751                 for (i = 0; i < 10000; i++) {
4752                         tw32(offset + CPU_STATE, 0xffffffff);
4753                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4754                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4755                                 break;
4756                 }
4757         }
4758
4759         if (i >= 10000) {
4760                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4761                        "and %s CPU\n",
4762                        tp->dev->name,
4763                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4764                 return -ENODEV;
4765         }
4766
4767         /* Clear firmware's nvram arbitration. */
4768         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4769                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4770         return 0;
4771 }
4772
4773 struct fw_info {
4774         unsigned int text_base;
4775         unsigned int text_len;
4776         u32 *text_data;
4777         unsigned int rodata_base;
4778         unsigned int rodata_len;
4779         u32 *rodata_data;
4780         unsigned int data_base;
4781         unsigned int data_len;
4782         u32 *data_data;
4783 };
4784
4785 /* tp->lock is held. */
4786 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4787                                  int cpu_scratch_size, struct fw_info *info)
4788 {
4789         int err, lock_err, i;
4790         void (*write_op)(struct tg3 *, u32, u32);
4791
4792         if (cpu_base == TX_CPU_BASE &&
4793             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4794                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4795                        "TX cpu firmware on %s which is 5705.\n",
4796                        tp->dev->name);
4797                 return -EINVAL;
4798         }
4799
4800         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4801                 write_op = tg3_write_mem;
4802         else
4803                 write_op = tg3_write_indirect_reg32;
4804
4805         /* It is possible that bootcode is still loading at this point.
4806          * Get the nvram lock first before halting the cpu.
4807          */
4808         lock_err = tg3_nvram_lock(tp);
4809         err = tg3_halt_cpu(tp, cpu_base);
4810         if (!lock_err)
4811                 tg3_nvram_unlock(tp);
4812         if (err)
4813                 goto out;
4814
4815         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4816                 write_op(tp, cpu_scratch_base + i, 0);
4817         tw32(cpu_base + CPU_STATE, 0xffffffff);
4818         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4819         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4820                 write_op(tp, (cpu_scratch_base +
4821                               (info->text_base & 0xffff) +
4822                               (i * sizeof(u32))),
4823                          (info->text_data ?
4824                           info->text_data[i] : 0));
4825         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4826                 write_op(tp, (cpu_scratch_base +
4827                               (info->rodata_base & 0xffff) +
4828                               (i * sizeof(u32))),
4829                          (info->rodata_data ?
4830                           info->rodata_data[i] : 0));
4831         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4832                 write_op(tp, (cpu_scratch_base +
4833                               (info->data_base & 0xffff) +
4834                               (i * sizeof(u32))),
4835                          (info->data_data ?
4836                           info->data_data[i] : 0));
4837
4838         err = 0;
4839
4840 out:
4841         return err;
4842 }
4843
4844 /* tp->lock is held. */
4845 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4846 {
4847         struct fw_info info;
4848         int err, i;
4849
4850         info.text_base = TG3_FW_TEXT_ADDR;
4851         info.text_len = TG3_FW_TEXT_LEN;
4852         info.text_data = &tg3FwText[0];
4853         info.rodata_base = TG3_FW_RODATA_ADDR;
4854         info.rodata_len = TG3_FW_RODATA_LEN;
4855         info.rodata_data = &tg3FwRodata[0];
4856         info.data_base = TG3_FW_DATA_ADDR;
4857         info.data_len = TG3_FW_DATA_LEN;
4858         info.data_data = NULL;
4859
4860         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4861                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4862                                     &info);
4863         if (err)
4864                 return err;
4865
4866         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4867                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4868                                     &info);
4869         if (err)
4870                 return err;
4871
4872         /* Now startup only the RX cpu. */
4873         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4874         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4875
4876         for (i = 0; i < 5; i++) {
4877                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4878                         break;
4879                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4880                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4881                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4882                 udelay(1000);
4883         }
4884         if (i >= 5) {
4885                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4886                        "to set RX CPU PC, is %08x should be %08x\n",
4887                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4888                        TG3_FW_TEXT_ADDR);
4889                 return -ENODEV;
4890         }
4891         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4892         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4893
4894         return 0;
4895 }
4896
4897 #if TG3_TSO_SUPPORT != 0
4898
4899 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4900 #define TG3_TSO_FW_RELASE_MINOR         0x6
4901 #define TG3_TSO_FW_RELEASE_FIX          0x0
4902 #define TG3_TSO_FW_START_ADDR           0x08000000
4903 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4904 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4905 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4906 #define TG3_TSO_FW_RODATA_LEN           0x60
4907 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4908 #define TG3_TSO_FW_DATA_LEN             0x30
4909 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4910 #define TG3_TSO_FW_SBSS_LEN             0x2c
4911 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4912 #define TG3_TSO_FW_BSS_LEN              0x894
4913
4914 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4915         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4916         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4917         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4918         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4919         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4920         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4921         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4922         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4923         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4924         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4925         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4926         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4927         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4928         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4929         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4930         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4931         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4932         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4933         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4934         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4935         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4936         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4937         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4938         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4939         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4940         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4941         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4942         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4943         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4944         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4945         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4946         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4947         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4948         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4949         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4950         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4951         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4952         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4953         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4954         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4955         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4956         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4957         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4958         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4959         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4960         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4961         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4962         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4963         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4964         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4965         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4966         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4967         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4968         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4969         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4970         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4971         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4972         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4973         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4974         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4975         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4976         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4977         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4978         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4979         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4980         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4981         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4982         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4983         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4984         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4985         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4986         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4987         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4988         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4989         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4990         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4991         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4992         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4993         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4994         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4995         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4996         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4997         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4998         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4999         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5000         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5001         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5002         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5003         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5004         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5005         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5006         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5007         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5008         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5009         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5010         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5011         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5012         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5013         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5014         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5015         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5016         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5017         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5018         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5019         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5020         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5021         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5022         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5023         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5024         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5025         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5026         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5027         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5028         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5029         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5030         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5031         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5032         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5033         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5034         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5035         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5036         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5037         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5038         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5039         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5040         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5041         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5042         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5043         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5044         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5045         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5046         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5047         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5048         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5049         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5050         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5051         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5052         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5053         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5054         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5055         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5056         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5057         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5058         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5059         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5060         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5061         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5062         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5063         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5064         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5065         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5066         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5067         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5068         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5069         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5070         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5071         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5072         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5073         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5074         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5075         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5076         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5077         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5078         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5079         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5080         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5081         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5082         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5083         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5084         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5085         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5086         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5087         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5088         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5089         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5090         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5091         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5092         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5093         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5094         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5095         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5096         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5097         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5098         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5099         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5100         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5101         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5102         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5103         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5104         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5105         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5106         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5107         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5108         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5109         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5110         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5111         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5112         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5113         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5114         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5115         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5116         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5117         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5118         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5119         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5120         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5121         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5122         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5123         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5124         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5125         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5126         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5127         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5128         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5129         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5130         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5131         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5132         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5133         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5134         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5135         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5136         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5137         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5138         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5139         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5140         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5141         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5142         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5143         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5144         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5145         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5146         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5147         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5148         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5149         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5150         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5151         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5152         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5153         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5154         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5155         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5156         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5157         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5158         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5159         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5160         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5161         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5162         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5163         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5164         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5165         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5166         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5167         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5168         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5169         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5170         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5171         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5172         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5173         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5174         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5175         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5176         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5177         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5178         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5179         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5180         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5181         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5182         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5183         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5184         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5185         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5186         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5187         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5188         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5189         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5190         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5191         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5192         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5193         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5194         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5195         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5196         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5197         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5198         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5199 };
5200
5201 static u32 tg3TsoFwRodata[] = {
5202         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5203         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5204         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5205         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5206         0x00000000,
5207 };
5208
5209 static u32 tg3TsoFwData[] = {
5210         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5211         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5212         0x00000000,
5213 };
5214
5215 /* 5705 needs a special version of the TSO firmware.  */
5216 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5217 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5218 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5219 #define TG3_TSO5_FW_START_ADDR          0x00010000
5220 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5221 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5222 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5223 #define TG3_TSO5_FW_RODATA_LEN          0x50
5224 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5225 #define TG3_TSO5_FW_DATA_LEN            0x20
5226 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5227 #define TG3_TSO5_FW_SBSS_LEN            0x28
5228 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5229 #define TG3_TSO5_FW_BSS_LEN             0x88
5230
5231 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5232         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5233         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5234         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5235         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5236         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5237         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5238         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5239         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5240         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5241         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5242         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5243         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5244         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5245         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5246         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5247         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5248         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5249         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5250         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5251         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5252         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5253         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5254         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5255         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5256         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5257         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5258         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5259         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5260         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5261         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5262         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5263         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5264         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5265         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5266         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5267         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5268         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5269         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5270         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5271         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5272         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5273         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5274         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5275         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5276         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5277         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5278         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5279         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5280         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5281         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5282         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5283         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5284         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5285         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5286         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5287         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5288         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5289         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5290         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5291         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5292         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5293         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5294         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5295         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5296         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5297         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5298         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5299         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5300         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5301         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5302         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5303         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5304         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5305         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5306         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5307         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5308         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5309         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5310         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5311         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5312         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5313         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5314         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5315         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5316         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5317         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5318         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5319         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5320         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5321         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5322         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5323         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5324         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5325         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5326         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5327         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5328         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5329         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5330         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5331         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5332         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5333         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5334         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5335         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5336         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5337         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5338         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5339         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5340         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5341         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5342         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5343         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5344         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5345         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5346         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5347         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5348         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5349         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5350         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5351         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5352         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5353         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5354         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5355         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5356         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5357         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5358         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5359         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5360         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5361         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5362         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5363         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5364         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5365         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5366         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5367         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5368         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5369         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5370         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5371         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5372         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5373         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5374         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5375         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5376         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5377         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5378         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5379         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5380         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5381         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5382         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5383         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5384         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5385         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5386         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5387         0x00000000, 0x00000000, 0x00000000,
5388 };
5389
5390 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5391         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5392         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5393         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5394         0x00000000, 0x00000000, 0x00000000,
5395 };
5396
5397 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5398         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5399         0x00000000, 0x00000000, 0x00000000,
5400 };
5401
5402 /* tp->lock is held. */
5403 static int tg3_load_tso_firmware(struct tg3 *tp)
5404 {
5405         struct fw_info info;
5406         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5407         int err, i;
5408
5409         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5410                 return 0;
5411
5412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5413                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5414                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5415                 info.text_data = &tg3Tso5FwText[0];
5416                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5417                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5418                 info.rodata_data = &tg3Tso5FwRodata[0];
5419                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5420                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5421                 info.data_data = &tg3Tso5FwData[0];
5422                 cpu_base = RX_CPU_BASE;
5423                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5424                 cpu_scratch_size = (info.text_len +
5425                                     info.rodata_len +
5426                                     info.data_len +
5427                                     TG3_TSO5_FW_SBSS_LEN +
5428                                     TG3_TSO5_FW_BSS_LEN);
5429         } else {
5430                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5431                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5432                 info.text_data = &tg3TsoFwText[0];
5433                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5434                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5435                 info.rodata_data = &tg3TsoFwRodata[0];
5436                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5437                 info.data_len = TG3_TSO_FW_DATA_LEN;
5438                 info.data_data = &tg3TsoFwData[0];
5439                 cpu_base = TX_CPU_BASE;
5440                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5441                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5442         }
5443
5444         err = tg3_load_firmware_cpu(tp, cpu_base,
5445                                     cpu_scratch_base, cpu_scratch_size,
5446                                     &info);
5447         if (err)
5448                 return err;
5449
5450         /* Now startup the cpu. */
5451         tw32(cpu_base + CPU_STATE, 0xffffffff);
5452         tw32_f(cpu_base + CPU_PC,    info.text_base);
5453
5454         for (i = 0; i < 5; i++) {
5455                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5456                         break;
5457                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5458                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5459                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5460                 udelay(1000);
5461         }
5462         if (i >= 5) {
5463                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5464                        "to set CPU PC, is %08x should be %08x\n",
5465                        tp->dev->name, tr32(cpu_base + CPU_PC),
5466                        info.text_base);
5467                 return -ENODEV;
5468         }
5469         tw32(cpu_base + CPU_STATE, 0xffffffff);
5470         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5471         return 0;
5472 }
5473
5474 #endif /* TG3_TSO_SUPPORT != 0 */
5475
5476 /* tp->lock is held. */
5477 static void __tg3_set_mac_addr(struct tg3 *tp)
5478 {
5479         u32 addr_high, addr_low;
5480         int i;
5481
5482         addr_high = ((tp->dev->dev_addr[0] << 8) |
5483                      tp->dev->dev_addr[1]);
5484         addr_low = ((tp->dev->dev_addr[2] << 24) |
5485                     (tp->dev->dev_addr[3] << 16) |
5486                     (tp->dev->dev_addr[4] <<  8) |
5487                     (tp->dev->dev_addr[5] <<  0));
5488         for (i = 0; i < 4; i++) {
5489                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5490                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5491         }
5492
5493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5494             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5495                 for (i = 0; i < 12; i++) {
5496                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5497                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5498                 }
5499         }
5500
5501         addr_high = (tp->dev->dev_addr[0] +
5502                      tp->dev->dev_addr[1] +
5503                      tp->dev->dev_addr[2] +
5504                      tp->dev->dev_addr[3] +
5505                      tp->dev->dev_addr[4] +
5506                      tp->dev->dev_addr[5]) &
5507                 TX_BACKOFF_SEED_MASK;
5508         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5509 }
5510
5511 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5512 {
5513         struct tg3 *tp = netdev_priv(dev);
5514         struct sockaddr *addr = p;
5515
5516         if (!is_valid_ether_addr(addr->sa_data))
5517                 return -EINVAL;
5518
5519         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5520
5521         spin_lock_bh(&tp->lock);
5522         __tg3_set_mac_addr(tp);
5523         spin_unlock_bh(&tp->lock);
5524
5525         return 0;
5526 }
5527
5528 /* tp->lock is held. */
5529 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5530                            dma_addr_t mapping, u32 maxlen_flags,
5531                            u32 nic_addr)
5532 {
5533         tg3_write_mem(tp,
5534                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5535                       ((u64) mapping >> 32));
5536         tg3_write_mem(tp,
5537                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5538                       ((u64) mapping & 0xffffffff));
5539         tg3_write_mem(tp,
5540                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5541                        maxlen_flags);
5542
5543         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5544                 tg3_write_mem(tp,
5545                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5546                               nic_addr);
5547 }
5548
5549 static void __tg3_set_rx_mode(struct net_device *);
5550 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5551 {
5552         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5553         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5554         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5555         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5556         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5557                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5558                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5559         }
5560         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5561         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5562         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5563                 u32 val = ec->stats_block_coalesce_usecs;
5564
5565                 if (!netif_carrier_ok(tp->dev))
5566                         val = 0;
5567
5568                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5569         }
5570 }
5571
5572 /* tp->lock is held. */
5573 static int tg3_reset_hw(struct tg3 *tp)
5574 {
5575         u32 val, rdmac_mode;
5576         int i, err, limit;
5577
5578         tg3_disable_ints(tp);
5579
5580         tg3_stop_fw(tp);
5581
5582         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5583
5584         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5585                 tg3_abort_hw(tp, 1);
5586         }
5587
5588         err = tg3_chip_reset(tp);
5589         if (err)
5590                 return err;
5591
5592         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5593
5594         /* This works around an issue with Athlon chipsets on
5595          * B3 tigon3 silicon.  This bit has no effect on any
5596          * other revision.  But do not set this on PCI Express
5597          * chips.
5598          */
5599         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5600                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5601         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5602
5603         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5604             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5605                 val = tr32(TG3PCI_PCISTATE);
5606                 val |= PCISTATE_RETRY_SAME_DMA;
5607                 tw32(TG3PCI_PCISTATE, val);
5608         }
5609
5610         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5611                 /* Enable some hw fixes.  */
5612                 val = tr32(TG3PCI_MSI_DATA);
5613                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5614                 tw32(TG3PCI_MSI_DATA, val);
5615         }
5616
5617         /* Descriptor ring init may make accesses to the
5618          * NIC SRAM area to setup the TX descriptors, so we
5619          * can only do this after the hardware has been
5620          * successfully reset.
5621          */
5622         tg3_init_rings(tp);
5623
5624         /* This value is determined during the probe time DMA
5625          * engine test, tg3_test_dma.
5626          */
5627         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5628
5629         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5630                           GRC_MODE_4X_NIC_SEND_RINGS |
5631                           GRC_MODE_NO_TX_PHDR_CSUM |
5632                           GRC_MODE_NO_RX_PHDR_CSUM);
5633         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5634         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5635                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5636         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5637                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5638
5639         tw32(GRC_MODE,
5640              tp->grc_mode |
5641              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5642
5643         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5644         val = tr32(GRC_MISC_CFG);
5645         val &= ~0xff;
5646         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5647         tw32(GRC_MISC_CFG, val);
5648
5649         /* Initialize MBUF/DESC pool. */
5650         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5651                 /* Do nothing.  */
5652         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5653                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5654                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5655                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5656                 else
5657                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5658                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5659                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5660         }
5661 #if TG3_TSO_SUPPORT != 0
5662         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5663                 int fw_len;
5664
5665                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5666                           TG3_TSO5_FW_RODATA_LEN +
5667                           TG3_TSO5_FW_DATA_LEN +
5668                           TG3_TSO5_FW_SBSS_LEN +
5669                           TG3_TSO5_FW_BSS_LEN);
5670                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5671                 tw32(BUFMGR_MB_POOL_ADDR,
5672                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5673                 tw32(BUFMGR_MB_POOL_SIZE,
5674                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5675         }
5676 #endif
5677
5678         if (tp->dev->mtu <= ETH_DATA_LEN) {
5679                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5680                      tp->bufmgr_config.mbuf_read_dma_low_water);
5681                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5682                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5683                 tw32(BUFMGR_MB_HIGH_WATER,
5684                      tp->bufmgr_config.mbuf_high_water);
5685         } else {
5686                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5687                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5688                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5689                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5690                 tw32(BUFMGR_MB_HIGH_WATER,
5691                      tp->bufmgr_config.mbuf_high_water_jumbo);
5692         }
5693         tw32(BUFMGR_DMA_LOW_WATER,
5694              tp->bufmgr_config.dma_low_water);
5695         tw32(BUFMGR_DMA_HIGH_WATER,
5696              tp->bufmgr_config.dma_high_water);
5697
5698         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5699         for (i = 0; i < 2000; i++) {
5700                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5701                         break;
5702                 udelay(10);
5703         }
5704         if (i >= 2000) {
5705                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5706                        tp->dev->name);
5707                 return -ENODEV;
5708         }
5709
5710         /* Setup replenish threshold. */
5711         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5712
5713         /* Initialize TG3_BDINFO's at:
5714          *  RCVDBDI_STD_BD:     standard eth size rx ring
5715          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5716          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5717          *
5718          * like so:
5719          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5720          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5721          *                              ring attribute flags
5722          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5723          *
5724          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5725          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5726          *
5727          * The size of each ring is fixed in the firmware, but the location is
5728          * configurable.
5729          */
5730         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5731              ((u64) tp->rx_std_mapping >> 32));
5732         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5733              ((u64) tp->rx_std_mapping & 0xffffffff));
5734         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5735              NIC_SRAM_RX_BUFFER_DESC);
5736
5737         /* Don't even try to program the JUMBO/MINI buffer descriptor
5738          * configs on 5705.
5739          */
5740         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5741                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5742                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5743         } else {
5744                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5745                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5746
5747                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5748                      BDINFO_FLAGS_DISABLED);
5749
5750                 /* Setup replenish threshold. */
5751                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5752
5753                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5754                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5755                              ((u64) tp->rx_jumbo_mapping >> 32));
5756                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5757                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5758                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5759                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5760                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5761                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5762                 } else {
5763                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5764                              BDINFO_FLAGS_DISABLED);
5765                 }
5766
5767         }
5768
5769         /* There is only one send ring on 5705/5750, no need to explicitly
5770          * disable the others.
5771          */
5772         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5773                 /* Clear out send RCB ring in SRAM. */
5774                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5775                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5776                                       BDINFO_FLAGS_DISABLED);
5777         }
5778
5779         tp->tx_prod = 0;
5780         tp->tx_cons = 0;
5781         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5782         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5783
5784         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5785                        tp->tx_desc_mapping,
5786                        (TG3_TX_RING_SIZE <<
5787                         BDINFO_FLAGS_MAXLEN_SHIFT),
5788                        NIC_SRAM_TX_BUFFER_DESC);
5789
5790         /* There is only one receive return ring on 5705/5750, no need
5791          * to explicitly disable the others.
5792          */
5793         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5794                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5795                      i += TG3_BDINFO_SIZE) {
5796                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5797                                       BDINFO_FLAGS_DISABLED);
5798                 }
5799         }
5800
5801         tp->rx_rcb_ptr = 0;
5802         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5803
5804         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5805                        tp->rx_rcb_mapping,
5806                        (TG3_RX_RCB_RING_SIZE(tp) <<
5807                         BDINFO_FLAGS_MAXLEN_SHIFT),
5808                        0);
5809
5810         tp->rx_std_ptr = tp->rx_pending;
5811         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5812                      tp->rx_std_ptr);
5813
5814         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5815                                                 tp->rx_jumbo_pending : 0;
5816         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5817                      tp->rx_jumbo_ptr);
5818
5819         /* Initialize MAC address and backoff seed. */
5820         __tg3_set_mac_addr(tp);
5821
5822         /* MTU + ethernet header + FCS + optional VLAN tag */
5823         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5824
5825         /* The slot time is changed by tg3_setup_phy if we
5826          * run at gigabit with half duplex.
5827          */
5828         tw32(MAC_TX_LENGTHS,
5829              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5830              (6 << TX_LENGTHS_IPG_SHIFT) |
5831              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5832
5833         /* Receive rules. */
5834         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5835         tw32(RCVLPC_CONFIG, 0x0181);
5836
5837         /* Calculate RDMAC_MODE setting early, we need it to determine
5838          * the RCVLPC_STATE_ENABLE mask.
5839          */
5840         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5841                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5842                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5843                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5844                       RDMAC_MODE_LNGREAD_ENAB);
5845         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5846                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5847
5848         /* If statement applies to 5705 and 5750 PCI devices only */
5849         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5850              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5851             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5852                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5853                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5854                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5855                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5856                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5857                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5858                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5859                 }
5860         }
5861
5862         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5863                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5864
5865 #if TG3_TSO_SUPPORT != 0
5866         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5867                 rdmac_mode |= (1 << 27);
5868 #endif
5869
5870         /* Receive/send statistics. */
5871         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5872             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5873                 val = tr32(RCVLPC_STATS_ENABLE);
5874                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5875                 tw32(RCVLPC_STATS_ENABLE, val);
5876         } else {
5877                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5878         }
5879         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5880         tw32(SNDDATAI_STATSENAB, 0xffffff);
5881         tw32(SNDDATAI_STATSCTRL,
5882              (SNDDATAI_SCTRL_ENABLE |
5883               SNDDATAI_SCTRL_FASTUPD));
5884
5885         /* Setup host coalescing engine. */
5886         tw32(HOSTCC_MODE, 0);
5887         for (i = 0; i < 2000; i++) {
5888                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5889                         break;
5890                 udelay(10);
5891         }
5892
5893         __tg3_set_coalesce(tp, &tp->coal);
5894
5895         /* set status block DMA address */
5896         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5897              ((u64) tp->status_mapping >> 32));
5898         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5899              ((u64) tp->status_mapping & 0xffffffff));
5900
5901         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5902                 /* Status/statistics block address.  See tg3_timer,
5903                  * the tg3_periodic_fetch_stats call there, and
5904                  * tg3_get_stats to see how this works for 5705/5750 chips.
5905                  */
5906                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5907                      ((u64) tp->stats_mapping >> 32));
5908                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5909                      ((u64) tp->stats_mapping & 0xffffffff));
5910                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5911                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5912         }
5913
5914         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5915
5916         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5917         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5918         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5919                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5920
5921         /* Clear statistics/status block in chip, and status block in ram. */
5922         for (i = NIC_SRAM_STATS_BLK;
5923              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5924              i += sizeof(u32)) {
5925                 tg3_write_mem(tp, i, 0);
5926                 udelay(40);
5927         }
5928         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5929
5930         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5931                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5932                 /* reset to prevent losing 1st rx packet intermittently */
5933                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5934                 udelay(10);
5935         }
5936
5937         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5938                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5939         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5940         udelay(40);
5941
5942         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5943          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5944          * register to preserve the GPIO settings for LOMs. The GPIOs,
5945          * whether used as inputs or outputs, are set by boot code after
5946          * reset.
5947          */
5948         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5949                 u32 gpio_mask;
5950
5951                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5952                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5953
5954                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5955                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5956                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5957
5958                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5959
5960                 /* GPIO1 must be driven high for eeprom write protect */
5961                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5962                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5963         }
5964         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5965         udelay(100);
5966
5967         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5968         tp->last_tag = 0;
5969
5970         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5971                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5972                 udelay(40);
5973         }
5974
5975         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5976                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5977                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5978                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5979                WDMAC_MODE_LNGREAD_ENAB);
5980
5981         /* If statement applies to 5705 and 5750 PCI devices only */
5982         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5983              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5985                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5986                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5987                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5988                         /* nothing */
5989                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5990                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5991                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5992                         val |= WDMAC_MODE_RX_ACCEL;
5993                 }
5994         }
5995
5996         tw32_f(WDMAC_MODE, val);
5997         udelay(40);
5998
5999         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6000                 val = tr32(TG3PCI_X_CAPS);
6001                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6002                         val &= ~PCIX_CAPS_BURST_MASK;
6003                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6004                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6005                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6006                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6007                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6008                                 val |= (tp->split_mode_max_reqs <<
6009                                         PCIX_CAPS_SPLIT_SHIFT);
6010                 }
6011                 tw32(TG3PCI_X_CAPS, val);
6012         }
6013
6014         tw32_f(RDMAC_MODE, rdmac_mode);
6015         udelay(40);
6016
6017         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6018         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6019                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6020         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6021         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6022         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6023         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6024         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6025 #if TG3_TSO_SUPPORT != 0
6026         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6027                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6028 #endif
6029         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6030         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6031
6032         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6033                 err = tg3_load_5701_a0_firmware_fix(tp);
6034                 if (err)
6035                         return err;
6036         }
6037
6038 #if TG3_TSO_SUPPORT != 0
6039         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6040                 err = tg3_load_tso_firmware(tp);
6041                 if (err)
6042                         return err;
6043         }
6044 #endif
6045
6046         tp->tx_mode = TX_MODE_ENABLE;
6047         tw32_f(MAC_TX_MODE, tp->tx_mode);
6048         udelay(100);
6049
6050         tp->rx_mode = RX_MODE_ENABLE;
6051         tw32_f(MAC_RX_MODE, tp->rx_mode);
6052         udelay(10);
6053
6054         if (tp->link_config.phy_is_low_power) {
6055                 tp->link_config.phy_is_low_power = 0;
6056                 tp->link_config.speed = tp->link_config.orig_speed;
6057                 tp->link_config.duplex = tp->link_config.orig_duplex;
6058                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6059         }
6060
6061         tp->mi_mode = MAC_MI_MODE_BASE;
6062         tw32_f(MAC_MI_MODE, tp->mi_mode);
6063         udelay(80);
6064
6065         tw32(MAC_LED_CTRL, tp->led_ctrl);
6066
6067         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6068         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6069                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6070                 udelay(10);
6071         }
6072         tw32_f(MAC_RX_MODE, tp->rx_mode);
6073         udelay(10);
6074
6075         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6076                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6077                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6078                         /* Set drive transmission level to 1.2V  */
6079                         /* only if the signal pre-emphasis bit is not set  */
6080                         val = tr32(MAC_SERDES_CFG);
6081                         val &= 0xfffff000;
6082                         val |= 0x880;
6083                         tw32(MAC_SERDES_CFG, val);
6084                 }
6085                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6086                         tw32(MAC_SERDES_CFG, 0x616000);
6087         }
6088
6089         /* Prevent chip from dropping frames when flow control
6090          * is enabled.
6091          */
6092         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6093
6094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6095             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6096                 /* Use hardware link auto-negotiation */
6097                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6098         }
6099
6100         err = tg3_setup_phy(tp, 1);
6101         if (err)
6102                 return err;
6103
6104         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6105                 u32 tmp;
6106
6107                 /* Clear CRC stats. */
6108                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6109                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6110                         tg3_readphy(tp, 0x14, &tmp);
6111                 }
6112         }
6113
6114         __tg3_set_rx_mode(tp->dev);
6115
6116         /* Initialize receive rules. */
6117         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6118         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6119         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6120         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6121
6122         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6123             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6124                 limit = 8;
6125         else
6126                 limit = 16;
6127         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6128                 limit -= 4;
6129         switch (limit) {
6130         case 16:
6131                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6132         case 15:
6133                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6134         case 14:
6135                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6136         case 13:
6137                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6138         case 12:
6139                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6140         case 11:
6141                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6142         case 10:
6143                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6144         case 9:
6145                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6146         case 8:
6147                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6148         case 7:
6149                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6150         case 6:
6151                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6152         case 5:
6153                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6154         case 4:
6155                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6156         case 3:
6157                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6158         case 2:
6159         case 1:
6160
6161         default:
6162                 break;
6163         };
6164
6165         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6166
6167         return 0;
6168 }
6169
6170 /* Called at device open time to get the chip ready for
6171  * packet processing.  Invoked with tp->lock held.
6172  */
6173 static int tg3_init_hw(struct tg3 *tp)
6174 {
6175         int err;
6176
6177         /* Force the chip into D0. */
6178         err = tg3_set_power_state(tp, 0);
6179         if (err)
6180                 goto out;
6181
6182         tg3_switch_clocks(tp);
6183
6184         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6185
6186         err = tg3_reset_hw(tp);
6187
6188 out:
6189         return err;
6190 }
6191
6192 #define TG3_STAT_ADD32(PSTAT, REG) \
6193 do {    u32 __val = tr32(REG); \
6194         (PSTAT)->low += __val; \
6195         if ((PSTAT)->low < __val) \
6196                 (PSTAT)->high += 1; \
6197 } while (0)
6198
6199 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6200 {
6201         struct tg3_hw_stats *sp = tp->hw_stats;
6202
6203         if (!netif_carrier_ok(tp->dev))
6204                 return;
6205
6206         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6207         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6208         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6209         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6210         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6211         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6212         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6213         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6214         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6215         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6216         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6217         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6218         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6219
6220         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6221         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6222         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6223         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6224         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6225         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6226         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6227         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6228         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6229         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6230         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6231         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6232         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6233         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6234 }
6235
6236 static void tg3_timer(unsigned long __opaque)
6237 {
6238         struct tg3 *tp = (struct tg3 *) __opaque;
6239
6240         spin_lock(&tp->lock);
6241
6242         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6243                 /* All of this garbage is because when using non-tagged
6244                  * IRQ status the mailbox/status_block protocol the chip
6245                  * uses with the cpu is race prone.
6246                  */
6247                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6248                         tw32(GRC_LOCAL_CTRL,
6249                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6250                 } else {
6251                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6252                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6253                 }
6254
6255                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6256                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6257                         spin_unlock(&tp->lock);
6258                         schedule_work(&tp->reset_task);
6259                         return;
6260                 }
6261         }
6262
6263         /* This part only runs once per second. */
6264         if (!--tp->timer_counter) {
6265                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6266                         tg3_periodic_fetch_stats(tp);
6267
6268                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6269                         u32 mac_stat;
6270                         int phy_event;
6271
6272                         mac_stat = tr32(MAC_STATUS);
6273
6274                         phy_event = 0;
6275                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6276                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6277                                         phy_event = 1;
6278                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6279                                 phy_event = 1;
6280
6281                         if (phy_event)
6282                                 tg3_setup_phy(tp, 0);
6283                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6284                         u32 mac_stat = tr32(MAC_STATUS);
6285                         int need_setup = 0;
6286
6287                         if (netif_carrier_ok(tp->dev) &&
6288                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6289                                 need_setup = 1;
6290                         }
6291                         if (! netif_carrier_ok(tp->dev) &&
6292                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6293                                          MAC_STATUS_SIGNAL_DET))) {
6294                                 need_setup = 1;
6295                         }
6296                         if (need_setup) {
6297                                 tw32_f(MAC_MODE,
6298                                      (tp->mac_mode &
6299                                       ~MAC_MODE_PORT_MODE_MASK));
6300                                 udelay(40);
6301                                 tw32_f(MAC_MODE, tp->mac_mode);
6302                                 udelay(40);
6303                                 tg3_setup_phy(tp, 0);
6304                         }
6305                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6306                         tg3_serdes_parallel_detect(tp);
6307
6308                 tp->timer_counter = tp->timer_multiplier;
6309         }
6310
6311         /* Heartbeat is only sent once every 2 seconds.  */
6312         if (!--tp->asf_counter) {
6313                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6314                         u32 val;
6315
6316                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6317                                            FWCMD_NICDRV_ALIVE2);
6318                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6319                         /* 5 seconds timeout */
6320                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6321                         val = tr32(GRC_RX_CPU_EVENT);
6322                         val |= (1 << 14);
6323                         tw32(GRC_RX_CPU_EVENT, val);
6324                 }
6325                 tp->asf_counter = tp->asf_multiplier;
6326         }
6327
6328         spin_unlock(&tp->lock);
6329
6330         tp->timer.expires = jiffies + tp->timer_offset;
6331         add_timer(&tp->timer);
6332 }
6333
6334 static int tg3_test_interrupt(struct tg3 *tp)
6335 {
6336         struct net_device *dev = tp->dev;
6337         int err, i;
6338         u32 int_mbox = 0;
6339
6340         if (!netif_running(dev))
6341                 return -ENODEV;
6342
6343         tg3_disable_ints(tp);
6344
6345         free_irq(tp->pdev->irq, dev);
6346
6347         err = request_irq(tp->pdev->irq, tg3_test_isr,
6348                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6349         if (err)
6350                 return err;
6351
6352         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6353         tg3_enable_ints(tp);
6354
6355         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6356                HOSTCC_MODE_NOW);
6357
6358         for (i = 0; i < 5; i++) {
6359                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6360                                         TG3_64BIT_REG_LOW);
6361                 if (int_mbox != 0)
6362                         break;
6363                 msleep(10);
6364         }
6365
6366         tg3_disable_ints(tp);
6367
6368         free_irq(tp->pdev->irq, dev);
6369         
6370         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6371                 err = request_irq(tp->pdev->irq, tg3_msi,
6372                                   SA_SAMPLE_RANDOM, dev->name, dev);
6373         else {
6374                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6375                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6376                         fn = tg3_interrupt_tagged;
6377                 err = request_irq(tp->pdev->irq, fn,
6378                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6379         }
6380
6381         if (err)
6382                 return err;
6383
6384         if (int_mbox != 0)
6385                 return 0;
6386
6387         return -EIO;
6388 }
6389
6390 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6391  * successfully restored
6392  */
6393 static int tg3_test_msi(struct tg3 *tp)
6394 {
6395         struct net_device *dev = tp->dev;
6396         int err;
6397         u16 pci_cmd;
6398
6399         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6400                 return 0;
6401
6402         /* Turn off SERR reporting in case MSI terminates with Master
6403          * Abort.
6404          */
6405         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6406         pci_write_config_word(tp->pdev, PCI_COMMAND,
6407                               pci_cmd & ~PCI_COMMAND_SERR);
6408
6409         err = tg3_test_interrupt(tp);
6410
6411         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6412
6413         if (!err)
6414                 return 0;
6415
6416         /* other failures */
6417         if (err != -EIO)
6418                 return err;
6419
6420         /* MSI test failed, go back to INTx mode */
6421         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6422                "switching to INTx mode. Please report this failure to "
6423                "the PCI maintainer and include system chipset information.\n",
6424                        tp->dev->name);
6425
6426         free_irq(tp->pdev->irq, dev);
6427         pci_disable_msi(tp->pdev);
6428
6429         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6430
6431         {
6432                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6433                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6434                         fn = tg3_interrupt_tagged;
6435
6436                 err = request_irq(tp->pdev->irq, fn,
6437                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6438         }
6439         if (err)
6440                 return err;
6441
6442         /* Need to reset the chip because the MSI cycle may have terminated
6443          * with Master Abort.
6444          */
6445         tg3_full_lock(tp, 1);
6446
6447         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6448         err = tg3_init_hw(tp);
6449
6450         tg3_full_unlock(tp);
6451
6452         if (err)
6453                 free_irq(tp->pdev->irq, dev);
6454
6455         return err;
6456 }
6457
6458 static int tg3_open(struct net_device *dev)
6459 {
6460         struct tg3 *tp = netdev_priv(dev);
6461         int err;
6462
6463         tg3_full_lock(tp, 0);
6464
6465         tg3_disable_ints(tp);
6466         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6467
6468         tg3_full_unlock(tp);
6469
6470         /* The placement of this call is tied
6471          * to the setup and use of Host TX descriptors.
6472          */
6473         err = tg3_alloc_consistent(tp);
6474         if (err)
6475                 return err;
6476
6477         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6478             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6479             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6480                 /* All MSI supporting chips should support tagged
6481                  * status.  Assert that this is the case.
6482                  */
6483                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6484                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6485                                "Not using MSI.\n", tp->dev->name);
6486                 } else if (pci_enable_msi(tp->pdev) == 0) {
6487                         u32 msi_mode;
6488
6489                         msi_mode = tr32(MSGINT_MODE);
6490                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6491                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6492                 }
6493         }
6494         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6495                 err = request_irq(tp->pdev->irq, tg3_msi,
6496                                   SA_SAMPLE_RANDOM, dev->name, dev);
6497         else {
6498                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6499                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6500                         fn = tg3_interrupt_tagged;
6501
6502                 err = request_irq(tp->pdev->irq, fn,
6503                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6504         }
6505
6506         if (err) {
6507                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6508                         pci_disable_msi(tp->pdev);
6509                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6510                 }
6511                 tg3_free_consistent(tp);
6512                 return err;
6513         }
6514
6515         tg3_full_lock(tp, 0);
6516
6517         err = tg3_init_hw(tp);
6518         if (err) {
6519                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6520                 tg3_free_rings(tp);
6521         } else {
6522                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6523                         tp->timer_offset = HZ;
6524                 else
6525                         tp->timer_offset = HZ / 10;
6526
6527                 BUG_ON(tp->timer_offset > HZ);
6528                 tp->timer_counter = tp->timer_multiplier =
6529                         (HZ / tp->timer_offset);
6530                 tp->asf_counter = tp->asf_multiplier =
6531                         ((HZ / tp->timer_offset) * 2);
6532
6533                 init_timer(&tp->timer);
6534                 tp->timer.expires = jiffies + tp->timer_offset;
6535                 tp->timer.data = (unsigned long) tp;
6536                 tp->timer.function = tg3_timer;
6537         }
6538
6539         tg3_full_unlock(tp);
6540
6541         if (err) {
6542                 free_irq(tp->pdev->irq, dev);
6543                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6544                         pci_disable_msi(tp->pdev);
6545                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6546                 }
6547                 tg3_free_consistent(tp);
6548                 return err;
6549         }
6550
6551         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6552                 err = tg3_test_msi(tp);
6553
6554                 if (err) {
6555                         tg3_full_lock(tp, 0);
6556
6557                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6558                                 pci_disable_msi(tp->pdev);
6559                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6560                         }
6561                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6562                         tg3_free_rings(tp);
6563                         tg3_free_consistent(tp);
6564
6565                         tg3_full_unlock(tp);
6566
6567                         return err;
6568                 }
6569         }
6570
6571         tg3_full_lock(tp, 0);
6572
6573         add_timer(&tp->timer);
6574         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6575         tg3_enable_ints(tp);
6576
6577         tg3_full_unlock(tp);
6578
6579         netif_start_queue(dev);
6580
6581         return 0;
6582 }
6583
6584 #if 0
6585 /*static*/ void tg3_dump_state(struct tg3 *tp)
6586 {
6587         u32 val32, val32_2, val32_3, val32_4, val32_5;
6588         u16 val16;
6589         int i;
6590
6591         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6592         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6593         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6594                val16, val32);
6595
6596         /* MAC block */
6597         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6598                tr32(MAC_MODE), tr32(MAC_STATUS));
6599         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6600                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6601         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6602                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6603         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6604                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6605
6606         /* Send data initiator control block */
6607         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6608                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6609         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6610                tr32(SNDDATAI_STATSCTRL));
6611
6612         /* Send data completion control block */
6613         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6614
6615         /* Send BD ring selector block */
6616         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6617                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6618
6619         /* Send BD initiator control block */
6620         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6621                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6622
6623         /* Send BD completion control block */
6624         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6625
6626         /* Receive list placement control block */
6627         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6628                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6629         printk("       RCVLPC_STATSCTRL[%08x]\n",
6630                tr32(RCVLPC_STATSCTRL));
6631
6632         /* Receive data and receive BD initiator control block */
6633         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6634                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6635
6636         /* Receive data completion control block */
6637         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6638                tr32(RCVDCC_MODE));
6639
6640         /* Receive BD initiator control block */
6641         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6642                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6643
6644         /* Receive BD completion control block */
6645         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6646                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6647
6648         /* Receive list selector control block */
6649         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6650                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6651
6652         /* Mbuf cluster free block */
6653         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6654                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6655
6656         /* Host coalescing control block */
6657         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6658                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6659         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6660                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6661                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6662         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6663                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6664                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6665         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6666                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6667         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6668                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6669
6670         /* Memory arbiter control block */
6671         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6672                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6673
6674         /* Buffer manager control block */
6675         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6676                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6677         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6678                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6679         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6680                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6681                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6682                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6683
6684         /* Read DMA control block */
6685         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6686                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6687
6688         /* Write DMA control block */
6689         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6690                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6691
6692         /* DMA completion block */
6693         printk("DEBUG: DMAC_MODE[%08x]\n",
6694                tr32(DMAC_MODE));
6695
6696         /* GRC block */
6697         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6698                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6699         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6700                tr32(GRC_LOCAL_CTRL));
6701
6702         /* TG3_BDINFOs */
6703         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6704                tr32(RCVDBDI_JUMBO_BD + 0x0),
6705                tr32(RCVDBDI_JUMBO_BD + 0x4),
6706                tr32(RCVDBDI_JUMBO_BD + 0x8),
6707                tr32(RCVDBDI_JUMBO_BD + 0xc));
6708         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6709                tr32(RCVDBDI_STD_BD + 0x0),
6710                tr32(RCVDBDI_STD_BD + 0x4),
6711                tr32(RCVDBDI_STD_BD + 0x8),
6712                tr32(RCVDBDI_STD_BD + 0xc));
6713         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6714                tr32(RCVDBDI_MINI_BD + 0x0),
6715                tr32(RCVDBDI_MINI_BD + 0x4),
6716                tr32(RCVDBDI_MINI_BD + 0x8),
6717                tr32(RCVDBDI_MINI_BD + 0xc));
6718
6719         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6720         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6721         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6722         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6723         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6724                val32, val32_2, val32_3, val32_4);
6725
6726         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6727         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6728         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6729         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6730         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6731                val32, val32_2, val32_3, val32_4);
6732
6733         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6734         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6735         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6736         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6737         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6738         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6739                val32, val32_2, val32_3, val32_4, val32_5);
6740
6741         /* SW status block */
6742         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6743                tp->hw_status->status,
6744                tp->hw_status->status_tag,
6745                tp->hw_status->rx_jumbo_consumer,
6746                tp->hw_status->rx_consumer,
6747                tp->hw_status->rx_mini_consumer,
6748                tp->hw_status->idx[0].rx_producer,
6749                tp->hw_status->idx[0].tx_consumer);
6750
6751         /* SW statistics block */
6752         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6753                ((u32 *)tp->hw_stats)[0],
6754                ((u32 *)tp->hw_stats)[1],
6755                ((u32 *)tp->hw_stats)[2],
6756                ((u32 *)tp->hw_stats)[3]);
6757
6758         /* Mailboxes */
6759         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6760                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6761                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6762                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6763                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6764
6765         /* NIC side send descriptors. */
6766         for (i = 0; i < 6; i++) {
6767                 unsigned long txd;
6768
6769                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6770                         + (i * sizeof(struct tg3_tx_buffer_desc));
6771                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6772                        i,
6773                        readl(txd + 0x0), readl(txd + 0x4),
6774                        readl(txd + 0x8), readl(txd + 0xc));
6775         }
6776
6777         /* NIC side RX descriptors. */
6778         for (i = 0; i < 6; i++) {
6779                 unsigned long rxd;
6780
6781                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6782                         + (i * sizeof(struct tg3_rx_buffer_desc));
6783                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6784                        i,
6785                        readl(rxd + 0x0), readl(rxd + 0x4),
6786                        readl(rxd + 0x8), readl(rxd + 0xc));
6787                 rxd += (4 * sizeof(u32));
6788                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6789                        i,
6790                        readl(rxd + 0x0), readl(rxd + 0x4),
6791                        readl(rxd + 0x8), readl(rxd + 0xc));
6792         }
6793
6794         for (i = 0; i < 6; i++) {
6795                 unsigned long rxd;
6796
6797                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6798                         + (i * sizeof(struct tg3_rx_buffer_desc));
6799                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6800                        i,
6801                        readl(rxd + 0x0), readl(rxd + 0x4),
6802                        readl(rxd + 0x8), readl(rxd + 0xc));
6803                 rxd += (4 * sizeof(u32));
6804                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6805                        i,
6806                        readl(rxd + 0x0), readl(rxd + 0x4),
6807                        readl(rxd + 0x8), readl(rxd + 0xc));
6808         }
6809 }
6810 #endif
6811
6812 static struct net_device_stats *tg3_get_stats(struct net_device *);
6813 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6814
6815 static int tg3_close(struct net_device *dev)
6816 {
6817         struct tg3 *tp = netdev_priv(dev);
6818
6819         /* Calling flush_scheduled_work() may deadlock because
6820          * linkwatch_event() may be on the workqueue and it will try to get
6821          * the rtnl_lock which we are holding.
6822          */
6823         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
6824                 msleep(1);
6825
6826         netif_stop_queue(dev);
6827
6828         del_timer_sync(&tp->timer);
6829
6830         tg3_full_lock(tp, 1);
6831 #if 0
6832         tg3_dump_state(tp);
6833 #endif
6834
6835         tg3_disable_ints(tp);
6836
6837         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6838         tg3_free_rings(tp);
6839         tp->tg3_flags &=
6840                 ~(TG3_FLAG_INIT_COMPLETE |
6841                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6842         netif_carrier_off(tp->dev);
6843
6844         tg3_full_unlock(tp);
6845
6846         free_irq(tp->pdev->irq, dev);
6847         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6848                 pci_disable_msi(tp->pdev);
6849                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6850         }
6851
6852         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6853                sizeof(tp->net_stats_prev));
6854         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6855                sizeof(tp->estats_prev));
6856
6857         tg3_free_consistent(tp);
6858
6859         return 0;
6860 }
6861
6862 static inline unsigned long get_stat64(tg3_stat64_t *val)
6863 {
6864         unsigned long ret;
6865
6866 #if (BITS_PER_LONG == 32)
6867         ret = val->low;
6868 #else
6869         ret = ((u64)val->high << 32) | ((u64)val->low);
6870 #endif
6871         return ret;
6872 }
6873
6874 static unsigned long calc_crc_errors(struct tg3 *tp)
6875 {
6876         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6877
6878         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6879             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6880              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6881                 u32 val;
6882
6883                 spin_lock_bh(&tp->lock);
6884                 if (!tg3_readphy(tp, 0x1e, &val)) {
6885                         tg3_writephy(tp, 0x1e, val | 0x8000);
6886                         tg3_readphy(tp, 0x14, &val);
6887                 } else
6888                         val = 0;
6889                 spin_unlock_bh(&tp->lock);
6890
6891                 tp->phy_crc_errors += val;
6892
6893                 return tp->phy_crc_errors;
6894         }
6895
6896         return get_stat64(&hw_stats->rx_fcs_errors);
6897 }
6898
6899 #define ESTAT_ADD(member) \
6900         estats->member =        old_estats->member + \
6901                                 get_stat64(&hw_stats->member)
6902
6903 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6904 {
6905         struct tg3_ethtool_stats *estats = &tp->estats;
6906         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6907         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6908
6909         if (!hw_stats)
6910                 return old_estats;
6911
6912         ESTAT_ADD(rx_octets);
6913         ESTAT_ADD(rx_fragments);
6914         ESTAT_ADD(rx_ucast_packets);
6915         ESTAT_ADD(rx_mcast_packets);
6916         ESTAT_ADD(rx_bcast_packets);
6917         ESTAT_ADD(rx_fcs_errors);
6918         ESTAT_ADD(rx_align_errors);
6919         ESTAT_ADD(rx_xon_pause_rcvd);
6920         ESTAT_ADD(rx_xoff_pause_rcvd);
6921         ESTAT_ADD(rx_mac_ctrl_rcvd);
6922         ESTAT_ADD(rx_xoff_entered);
6923         ESTAT_ADD(rx_frame_too_long_errors);
6924         ESTAT_ADD(rx_jabbers);
6925         ESTAT_ADD(rx_undersize_packets);
6926         ESTAT_ADD(rx_in_length_errors);
6927         ESTAT_ADD(rx_out_length_errors);
6928         ESTAT_ADD(rx_64_or_less_octet_packets);
6929         ESTAT_ADD(rx_65_to_127_octet_packets);
6930         ESTAT_ADD(rx_128_to_255_octet_packets);
6931         ESTAT_ADD(rx_256_to_511_octet_packets);
6932         ESTAT_ADD(rx_512_to_1023_octet_packets);
6933         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6934         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6935         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6936         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6937         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6938
6939         ESTAT_ADD(tx_octets);
6940         ESTAT_ADD(tx_collisions);
6941         ESTAT_ADD(tx_xon_sent);
6942         ESTAT_ADD(tx_xoff_sent);
6943         ESTAT_ADD(tx_flow_control);
6944         ESTAT_ADD(tx_mac_errors);
6945         ESTAT_ADD(tx_single_collisions);
6946         ESTAT_ADD(tx_mult_collisions);
6947         ESTAT_ADD(tx_deferred);
6948         ESTAT_ADD(tx_excessive_collisions);
6949         ESTAT_ADD(tx_late_collisions);
6950         ESTAT_ADD(tx_collide_2times);
6951         ESTAT_ADD(tx_collide_3times);
6952         ESTAT_ADD(tx_collide_4times);
6953         ESTAT_ADD(tx_collide_5times);
6954         ESTAT_ADD(tx_collide_6times);
6955         ESTAT_ADD(tx_collide_7times);
6956         ESTAT_ADD(tx_collide_8times);
6957         ESTAT_ADD(tx_collide_9times);
6958         ESTAT_ADD(tx_collide_10times);
6959         ESTAT_ADD(tx_collide_11times);
6960         ESTAT_ADD(tx_collide_12times);
6961         ESTAT_ADD(tx_collide_13times);
6962         ESTAT_ADD(tx_collide_14times);
6963         ESTAT_ADD(tx_collide_15times);
6964         ESTAT_ADD(tx_ucast_packets);
6965         ESTAT_ADD(tx_mcast_packets);
6966         ESTAT_ADD(tx_bcast_packets);
6967         ESTAT_ADD(tx_carrier_sense_errors);
6968         ESTAT_ADD(tx_discards);
6969         ESTAT_ADD(tx_errors);
6970
6971         ESTAT_ADD(dma_writeq_full);
6972         ESTAT_ADD(dma_write_prioq_full);
6973         ESTAT_ADD(rxbds_empty);
6974         ESTAT_ADD(rx_discards);
6975         ESTAT_ADD(rx_errors);
6976         ESTAT_ADD(rx_threshold_hit);
6977
6978         ESTAT_ADD(dma_readq_full);
6979         ESTAT_ADD(dma_read_prioq_full);
6980         ESTAT_ADD(tx_comp_queue_full);
6981
6982         ESTAT_ADD(ring_set_send_prod_index);
6983         ESTAT_ADD(ring_status_update);
6984         ESTAT_ADD(nic_irqs);
6985         ESTAT_ADD(nic_avoided_irqs);
6986         ESTAT_ADD(nic_tx_threshold_hit);
6987
6988         return estats;
6989 }
6990
6991 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6992 {
6993         struct tg3 *tp = netdev_priv(dev);
6994         struct net_device_stats *stats = &tp->net_stats;
6995         struct net_device_stats *old_stats = &tp->net_stats_prev;
6996         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6997
6998         if (!hw_stats)
6999                 return old_stats;
7000
7001         stats->rx_packets = old_stats->rx_packets +
7002                 get_stat64(&hw_stats->rx_ucast_packets) +
7003                 get_stat64(&hw_stats->rx_mcast_packets) +
7004                 get_stat64(&hw_stats->rx_bcast_packets);
7005                 
7006         stats->tx_packets = old_stats->tx_packets +
7007                 get_stat64(&hw_stats->tx_ucast_packets) +
7008                 get_stat64(&hw_stats->tx_mcast_packets) +
7009                 get_stat64(&hw_stats->tx_bcast_packets);
7010
7011         stats->rx_bytes = old_stats->rx_bytes +
7012                 get_stat64(&hw_stats->rx_octets);
7013         stats->tx_bytes = old_stats->tx_bytes +
7014                 get_stat64(&hw_stats->tx_octets);
7015
7016         stats->rx_errors = old_stats->rx_errors +
7017                 get_stat64(&hw_stats->rx_errors);
7018         stats->tx_errors = old_stats->tx_errors +
7019                 get_stat64(&hw_stats->tx_errors) +
7020                 get_stat64(&hw_stats->tx_mac_errors) +
7021                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7022                 get_stat64(&hw_stats->tx_discards);
7023
7024         stats->multicast = old_stats->multicast +
7025                 get_stat64(&hw_stats->rx_mcast_packets);
7026         stats->collisions = old_stats->collisions +
7027                 get_stat64(&hw_stats->tx_collisions);
7028
7029         stats->rx_length_errors = old_stats->rx_length_errors +
7030                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7031                 get_stat64(&hw_stats->rx_undersize_packets);
7032
7033         stats->rx_over_errors = old_stats->rx_over_errors +
7034                 get_stat64(&hw_stats->rxbds_empty);
7035         stats->rx_frame_errors = old_stats->rx_frame_errors +
7036                 get_stat64(&hw_stats->rx_align_errors);
7037         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7038                 get_stat64(&hw_stats->tx_discards);
7039         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7040                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7041
7042         stats->rx_crc_errors = old_stats->rx_crc_errors +
7043                 calc_crc_errors(tp);
7044
7045         stats->rx_missed_errors = old_stats->rx_missed_errors +
7046                 get_stat64(&hw_stats->rx_discards);
7047
7048         return stats;
7049 }
7050
7051 static inline u32 calc_crc(unsigned char *buf, int len)
7052 {
7053         u32 reg;
7054         u32 tmp;
7055         int j, k;
7056
7057         reg = 0xffffffff;
7058
7059         for (j = 0; j < len; j++) {
7060                 reg ^= buf[j];
7061
7062                 for (k = 0; k < 8; k++) {
7063                         tmp = reg & 0x01;
7064
7065                         reg >>= 1;
7066
7067                         if (tmp) {
7068                                 reg ^= 0xedb88320;
7069                         }
7070                 }
7071         }
7072
7073         return ~reg;
7074 }
7075
7076 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7077 {
7078         /* accept or reject all multicast frames */
7079         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7080         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7081         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7082         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7083 }
7084
7085 static void __tg3_set_rx_mode(struct net_device *dev)
7086 {
7087         struct tg3 *tp = netdev_priv(dev);
7088         u32 rx_mode;
7089
7090         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7091                                   RX_MODE_KEEP_VLAN_TAG);
7092
7093         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7094          * flag clear.
7095          */
7096 #if TG3_VLAN_TAG_USED
7097         if (!tp->vlgrp &&
7098             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7099                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7100 #else
7101         /* By definition, VLAN is disabled always in this
7102          * case.
7103          */
7104         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7105                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7106 #endif
7107
7108         if (dev->flags & IFF_PROMISC) {
7109                 /* Promiscuous mode. */
7110                 rx_mode |= RX_MODE_PROMISC;
7111         } else if (dev->flags & IFF_ALLMULTI) {
7112                 /* Accept all multicast. */
7113                 tg3_set_multi (tp, 1);
7114         } else if (dev->mc_count < 1) {
7115                 /* Reject all multicast. */
7116                 tg3_set_multi (tp, 0);
7117         } else {
7118                 /* Accept one or more multicast(s). */
7119                 struct dev_mc_list *mclist;
7120                 unsigned int i;
7121                 u32 mc_filter[4] = { 0, };
7122                 u32 regidx;
7123                 u32 bit;
7124                 u32 crc;
7125
7126                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7127                      i++, mclist = mclist->next) {
7128
7129                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7130                         bit = ~crc & 0x7f;
7131                         regidx = (bit & 0x60) >> 5;
7132                         bit &= 0x1f;
7133                         mc_filter[regidx] |= (1 << bit);
7134                 }
7135
7136                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7137                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7138                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7139                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7140         }
7141
7142         if (rx_mode != tp->rx_mode) {
7143                 tp->rx_mode = rx_mode;
7144                 tw32_f(MAC_RX_MODE, rx_mode);
7145                 udelay(10);
7146         }
7147 }
7148
7149 static void tg3_set_rx_mode(struct net_device *dev)
7150 {
7151         struct tg3 *tp = netdev_priv(dev);
7152
7153         tg3_full_lock(tp, 0);
7154         __tg3_set_rx_mode(dev);
7155         tg3_full_unlock(tp);
7156 }
7157
7158 #define TG3_REGDUMP_LEN         (32 * 1024)
7159
7160 static int tg3_get_regs_len(struct net_device *dev)
7161 {
7162         return TG3_REGDUMP_LEN;
7163 }
7164
7165 static void tg3_get_regs(struct net_device *dev,
7166                 struct ethtool_regs *regs, void *_p)
7167 {
7168         u32 *p = _p;
7169         struct tg3 *tp = netdev_priv(dev);
7170         u8 *orig_p = _p;
7171         int i;
7172
7173         regs->version = 0;
7174
7175         memset(p, 0, TG3_REGDUMP_LEN);
7176
7177         tg3_full_lock(tp, 0);
7178
7179 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7180 #define GET_REG32_LOOP(base,len)                \
7181 do {    p = (u32 *)(orig_p + (base));           \
7182         for (i = 0; i < len; i += 4)            \
7183                 __GET_REG32((base) + i);        \
7184 } while (0)
7185 #define GET_REG32_1(reg)                        \
7186 do {    p = (u32 *)(orig_p + (reg));            \
7187         __GET_REG32((reg));                     \
7188 } while (0)
7189
7190         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7191         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7192         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7193         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7194         GET_REG32_1(SNDDATAC_MODE);
7195         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7196         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7197         GET_REG32_1(SNDBDC_MODE);
7198         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7199         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7200         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7201         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7202         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7203         GET_REG32_1(RCVDCC_MODE);
7204         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7205         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7206         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7207         GET_REG32_1(MBFREE_MODE);
7208         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7209         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7210         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7211         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7212         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7213         GET_REG32_1(RX_CPU_MODE);
7214         GET_REG32_1(RX_CPU_STATE);
7215         GET_REG32_1(RX_CPU_PGMCTR);
7216         GET_REG32_1(RX_CPU_HWBKPT);
7217         GET_REG32_1(TX_CPU_MODE);
7218         GET_REG32_1(TX_CPU_STATE);
7219         GET_REG32_1(TX_CPU_PGMCTR);
7220         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7221         GET_REG32_LOOP(FTQ_RESET, 0x120);
7222         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7223         GET_REG32_1(DMAC_MODE);
7224         GET_REG32_LOOP(GRC_MODE, 0x4c);
7225         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7226                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7227
7228 #undef __GET_REG32
7229 #undef GET_REG32_LOOP
7230 #undef GET_REG32_1
7231
7232         tg3_full_unlock(tp);
7233 }
7234
7235 static int tg3_get_eeprom_len(struct net_device *dev)
7236 {
7237         struct tg3 *tp = netdev_priv(dev);
7238
7239         return tp->nvram_size;
7240 }
7241
7242 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7243
7244 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7245 {
7246         struct tg3 *tp = netdev_priv(dev);
7247         int ret;
7248         u8  *pd;
7249         u32 i, offset, len, val, b_offset, b_count;
7250
7251         offset = eeprom->offset;
7252         len = eeprom->len;
7253         eeprom->len = 0;
7254
7255         eeprom->magic = TG3_EEPROM_MAGIC;
7256
7257         if (offset & 3) {
7258                 /* adjustments to start on required 4 byte boundary */
7259                 b_offset = offset & 3;
7260                 b_count = 4 - b_offset;
7261                 if (b_count > len) {
7262                         /* i.e. offset=1 len=2 */
7263                         b_count = len;
7264                 }
7265                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7266                 if (ret)
7267                         return ret;
7268                 val = cpu_to_le32(val);
7269                 memcpy(data, ((char*)&val) + b_offset, b_count);
7270                 len -= b_count;
7271                 offset += b_count;
7272                 eeprom->len += b_count;
7273         }
7274
7275         /* read bytes upto the last 4 byte boundary */
7276         pd = &data[eeprom->len];
7277         for (i = 0; i < (len - (len & 3)); i += 4) {
7278                 ret = tg3_nvram_read(tp, offset + i, &val);
7279                 if (ret) {
7280                         eeprom->len += i;
7281                         return ret;
7282                 }
7283                 val = cpu_to_le32(val);
7284                 memcpy(pd + i, &val, 4);
7285         }
7286         eeprom->len += i;
7287
7288         if (len & 3) {
7289                 /* read last bytes not ending on 4 byte boundary */
7290                 pd = &data[eeprom->len];
7291                 b_count = len & 3;
7292                 b_offset = offset + len - b_count;
7293                 ret = tg3_nvram_read(tp, b_offset, &val);
7294                 if (ret)
7295                         return ret;
7296                 val = cpu_to_le32(val);
7297                 memcpy(pd, ((char*)&val), b_count);
7298                 eeprom->len += b_count;
7299         }
7300         return 0;
7301 }
7302
7303 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7304
7305 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7306 {
7307         struct tg3 *tp = netdev_priv(dev);
7308         int ret;
7309         u32 offset, len, b_offset, odd_len, start, end;
7310         u8 *buf;
7311
7312         if (eeprom->magic != TG3_EEPROM_MAGIC)
7313                 return -EINVAL;
7314
7315         offset = eeprom->offset;
7316         len = eeprom->len;
7317
7318         if ((b_offset = (offset & 3))) {
7319                 /* adjustments to start on required 4 byte boundary */
7320                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7321                 if (ret)
7322                         return ret;
7323                 start = cpu_to_le32(start);
7324                 len += b_offset;
7325                 offset &= ~3;
7326                 if (len < 4)
7327                         len = 4;
7328         }
7329
7330         odd_len = 0;
7331         if (len & 3) {
7332                 /* adjustments to end on required 4 byte boundary */
7333                 odd_len = 1;
7334                 len = (len + 3) & ~3;
7335                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7336                 if (ret)
7337                         return ret;
7338                 end = cpu_to_le32(end);
7339         }
7340
7341         buf = data;
7342         if (b_offset || odd_len) {
7343                 buf = kmalloc(len, GFP_KERNEL);
7344                 if (buf == 0)
7345                         return -ENOMEM;
7346                 if (b_offset)
7347                         memcpy(buf, &start, 4);
7348                 if (odd_len)
7349                         memcpy(buf+len-4, &end, 4);
7350                 memcpy(buf + b_offset, data, eeprom->len);
7351         }
7352
7353         ret = tg3_nvram_write_block(tp, offset, len, buf);
7354
7355         if (buf != data)
7356                 kfree(buf);
7357
7358         return ret;
7359 }
7360
7361 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7362 {
7363         struct tg3 *tp = netdev_priv(dev);
7364   
7365         cmd->supported = (SUPPORTED_Autoneg);
7366
7367         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7368                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7369                                    SUPPORTED_1000baseT_Full);
7370
7371         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7372                 cmd->supported |= (SUPPORTED_100baseT_Half |
7373                                   SUPPORTED_100baseT_Full |
7374                                   SUPPORTED_10baseT_Half |
7375                                   SUPPORTED_10baseT_Full |
7376                                   SUPPORTED_MII);
7377         else
7378                 cmd->supported |= SUPPORTED_FIBRE;
7379   
7380         cmd->advertising = tp->link_config.advertising;
7381         if (netif_running(dev)) {
7382                 cmd->speed = tp->link_config.active_speed;
7383                 cmd->duplex = tp->link_config.active_duplex;
7384         }
7385         cmd->port = 0;
7386         cmd->phy_address = PHY_ADDR;
7387         cmd->transceiver = 0;
7388         cmd->autoneg = tp->link_config.autoneg;
7389         cmd->maxtxpkt = 0;
7390         cmd->maxrxpkt = 0;
7391         return 0;
7392 }
7393   
7394 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7395 {
7396         struct tg3 *tp = netdev_priv(dev);
7397   
7398         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7399                 /* These are the only valid advertisement bits allowed.  */
7400                 if (cmd->autoneg == AUTONEG_ENABLE &&
7401                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7402                                           ADVERTISED_1000baseT_Full |
7403                                           ADVERTISED_Autoneg |
7404                                           ADVERTISED_FIBRE)))
7405                         return -EINVAL;
7406                 /* Fiber can only do SPEED_1000.  */
7407                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7408                          (cmd->speed != SPEED_1000))
7409                         return -EINVAL;
7410         /* Copper cannot force SPEED_1000.  */
7411         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7412                    (cmd->speed == SPEED_1000))
7413                 return -EINVAL;
7414         else if ((cmd->speed == SPEED_1000) &&
7415                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7416                 return -EINVAL;
7417
7418         tg3_full_lock(tp, 0);
7419
7420         tp->link_config.autoneg = cmd->autoneg;
7421         if (cmd->autoneg == AUTONEG_ENABLE) {
7422                 tp->link_config.advertising = cmd->advertising;
7423                 tp->link_config.speed = SPEED_INVALID;
7424                 tp->link_config.duplex = DUPLEX_INVALID;
7425         } else {
7426                 tp->link_config.advertising = 0;
7427                 tp->link_config.speed = cmd->speed;
7428                 tp->link_config.duplex = cmd->duplex;
7429         }
7430   
7431         if (netif_running(dev))
7432                 tg3_setup_phy(tp, 1);
7433
7434         tg3_full_unlock(tp);
7435   
7436         return 0;
7437 }
7438   
7439 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7440 {
7441         struct tg3 *tp = netdev_priv(dev);
7442   
7443         strcpy(info->driver, DRV_MODULE_NAME);
7444         strcpy(info->version, DRV_MODULE_VERSION);
7445         strcpy(info->bus_info, pci_name(tp->pdev));
7446 }
7447   
7448 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7449 {
7450         struct tg3 *tp = netdev_priv(dev);
7451   
7452         wol->supported = WAKE_MAGIC;
7453         wol->wolopts = 0;
7454         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7455                 wol->wolopts = WAKE_MAGIC;
7456         memset(&wol->sopass, 0, sizeof(wol->sopass));
7457 }
7458   
7459 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7460 {
7461         struct tg3 *tp = netdev_priv(dev);
7462   
7463         if (wol->wolopts & ~WAKE_MAGIC)
7464                 return -EINVAL;
7465         if ((wol->wolopts & WAKE_MAGIC) &&
7466             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7467             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7468                 return -EINVAL;
7469   
7470         spin_lock_bh(&tp->lock);
7471         if (wol->wolopts & WAKE_MAGIC)
7472                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7473         else
7474                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7475         spin_unlock_bh(&tp->lock);
7476   
7477         return 0;
7478 }
7479   
7480 static u32 tg3_get_msglevel(struct net_device *dev)
7481 {
7482         struct tg3 *tp = netdev_priv(dev);
7483         return tp->msg_enable;
7484 }
7485   
7486 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7487 {
7488         struct tg3 *tp = netdev_priv(dev);
7489         tp->msg_enable = value;
7490 }
7491   
7492 #if TG3_TSO_SUPPORT != 0
7493 static int tg3_set_tso(struct net_device *dev, u32 value)
7494 {
7495         struct tg3 *tp = netdev_priv(dev);
7496
7497         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7498                 if (value)
7499                         return -EINVAL;
7500                 return 0;
7501         }
7502         return ethtool_op_set_tso(dev, value);
7503 }
7504 #endif
7505   
7506 static int tg3_nway_reset(struct net_device *dev)
7507 {
7508         struct tg3 *tp = netdev_priv(dev);
7509         u32 bmcr;
7510         int r;
7511   
7512         if (!netif_running(dev))
7513                 return -EAGAIN;
7514
7515         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7516                 return -EINVAL;
7517
7518         spin_lock_bh(&tp->lock);
7519         r = -EINVAL;
7520         tg3_readphy(tp, MII_BMCR, &bmcr);
7521         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7522             ((bmcr & BMCR_ANENABLE) ||
7523              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7524                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7525                                            BMCR_ANENABLE);
7526                 r = 0;
7527         }
7528         spin_unlock_bh(&tp->lock);
7529   
7530         return r;
7531 }
7532   
7533 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7534 {
7535         struct tg3 *tp = netdev_priv(dev);
7536   
7537         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7538         ering->rx_mini_max_pending = 0;
7539         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7540
7541         ering->rx_pending = tp->rx_pending;
7542         ering->rx_mini_pending = 0;
7543         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7544         ering->tx_pending = tp->tx_pending;
7545 }
7546   
7547 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7548 {
7549         struct tg3 *tp = netdev_priv(dev);
7550         int irq_sync = 0;
7551   
7552         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7553             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7554             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7555                 return -EINVAL;
7556   
7557         if (netif_running(dev)) {
7558                 tg3_netif_stop(tp);
7559                 irq_sync = 1;
7560         }
7561
7562         tg3_full_lock(tp, irq_sync);
7563   
7564         tp->rx_pending = ering->rx_pending;
7565
7566         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7567             tp->rx_pending > 63)
7568                 tp->rx_pending = 63;
7569         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7570         tp->tx_pending = ering->tx_pending;
7571
7572         if (netif_running(dev)) {
7573                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7574                 tg3_init_hw(tp);
7575                 tg3_netif_start(tp);
7576         }
7577
7578         tg3_full_unlock(tp);
7579   
7580         return 0;
7581 }
7582   
7583 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7584 {
7585         struct tg3 *tp = netdev_priv(dev);
7586   
7587         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7588         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7589         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7590 }
7591   
7592 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7593 {
7594         struct tg3 *tp = netdev_priv(dev);
7595         int irq_sync = 0;
7596   
7597         if (netif_running(dev)) {
7598                 tg3_netif_stop(tp);
7599                 irq_sync = 1;
7600         }
7601
7602         tg3_full_lock(tp, irq_sync);
7603
7604         if (epause->autoneg)
7605                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7606         else
7607                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7608         if (epause->rx_pause)
7609                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7610         else
7611                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7612         if (epause->tx_pause)
7613                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7614         else
7615                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7616
7617         if (netif_running(dev)) {
7618                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7619                 tg3_init_hw(tp);
7620                 tg3_netif_start(tp);
7621         }
7622
7623         tg3_full_unlock(tp);
7624   
7625         return 0;
7626 }
7627   
7628 static u32 tg3_get_rx_csum(struct net_device *dev)
7629 {
7630         struct tg3 *tp = netdev_priv(dev);
7631         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7632 }
7633   
7634 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7635 {
7636         struct tg3 *tp = netdev_priv(dev);
7637   
7638         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7639                 if (data != 0)
7640                         return -EINVAL;
7641                 return 0;
7642         }
7643   
7644         spin_lock_bh(&tp->lock);
7645         if (data)
7646                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7647         else
7648                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7649         spin_unlock_bh(&tp->lock);
7650   
7651         return 0;
7652 }
7653   
7654 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7655 {
7656         struct tg3 *tp = netdev_priv(dev);
7657   
7658         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7659                 if (data != 0)
7660                         return -EINVAL;
7661                 return 0;
7662         }
7663   
7664         if (data)
7665                 dev->features |= NETIF_F_IP_CSUM;
7666         else
7667                 dev->features &= ~NETIF_F_IP_CSUM;
7668
7669         return 0;
7670 }
7671
7672 static int tg3_get_stats_count (struct net_device *dev)
7673 {
7674         return TG3_NUM_STATS;
7675 }
7676
7677 static int tg3_get_test_count (struct net_device *dev)
7678 {
7679         return TG3_NUM_TEST;
7680 }
7681
7682 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7683 {
7684         switch (stringset) {
7685         case ETH_SS_STATS:
7686                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7687                 break;
7688         case ETH_SS_TEST:
7689                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7690                 break;
7691         default:
7692                 WARN_ON(1);     /* we need a WARN() */
7693                 break;
7694         }
7695 }
7696
7697 static int tg3_phys_id(struct net_device *dev, u32 data)
7698 {
7699         struct tg3 *tp = netdev_priv(dev);
7700         int i;
7701
7702         if (!netif_running(tp->dev))
7703                 return -EAGAIN;
7704
7705         if (data == 0)
7706                 data = 2;
7707
7708         for (i = 0; i < (data * 2); i++) {
7709                 if ((i % 2) == 0)
7710                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7711                                            LED_CTRL_1000MBPS_ON |
7712                                            LED_CTRL_100MBPS_ON |
7713                                            LED_CTRL_10MBPS_ON |
7714                                            LED_CTRL_TRAFFIC_OVERRIDE |
7715                                            LED_CTRL_TRAFFIC_BLINK |
7716                                            LED_CTRL_TRAFFIC_LED);
7717         
7718                 else
7719                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7720                                            LED_CTRL_TRAFFIC_OVERRIDE);
7721
7722                 if (msleep_interruptible(500))
7723                         break;
7724         }
7725         tw32(MAC_LED_CTRL, tp->led_ctrl);
7726         return 0;
7727 }
7728
7729 static void tg3_get_ethtool_stats (struct net_device *dev,
7730                                    struct ethtool_stats *estats, u64 *tmp_stats)
7731 {
7732         struct tg3 *tp = netdev_priv(dev);
7733         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7734 }
7735
7736 #define NVRAM_TEST_SIZE 0x100
7737
7738 static int tg3_test_nvram(struct tg3 *tp)
7739 {
7740         u32 *buf, csum;
7741         int i, j, err = 0;
7742
7743         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7744         if (buf == NULL)
7745                 return -ENOMEM;
7746
7747         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7748                 u32 val;
7749
7750                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7751                         break;
7752                 buf[j] = cpu_to_le32(val);
7753         }
7754         if (i < NVRAM_TEST_SIZE)
7755                 goto out;
7756
7757         err = -EIO;
7758         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7759                 goto out;
7760
7761         /* Bootstrap checksum at offset 0x10 */
7762         csum = calc_crc((unsigned char *) buf, 0x10);
7763         if(csum != cpu_to_le32(buf[0x10/4]))
7764                 goto out;
7765
7766         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7767         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7768         if (csum != cpu_to_le32(buf[0xfc/4]))
7769                  goto out;
7770
7771         err = 0;
7772
7773 out:
7774         kfree(buf);
7775         return err;
7776 }
7777
7778 #define TG3_SERDES_TIMEOUT_SEC  2
7779 #define TG3_COPPER_TIMEOUT_SEC  6
7780
7781 static int tg3_test_link(struct tg3 *tp)
7782 {
7783         int i, max;
7784
7785         if (!netif_running(tp->dev))
7786                 return -ENODEV;
7787
7788         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7789                 max = TG3_SERDES_TIMEOUT_SEC;
7790         else
7791                 max = TG3_COPPER_TIMEOUT_SEC;
7792
7793         for (i = 0; i < max; i++) {
7794                 if (netif_carrier_ok(tp->dev))
7795                         return 0;
7796
7797                 if (msleep_interruptible(1000))
7798                         break;
7799         }
7800
7801         return -EIO;
7802 }
7803
7804 /* Only test the commonly used registers */
7805 static int tg3_test_registers(struct tg3 *tp)
7806 {
7807         int i, is_5705;
7808         u32 offset, read_mask, write_mask, val, save_val, read_val;
7809         static struct {
7810                 u16 offset;
7811                 u16 flags;
7812 #define TG3_FL_5705     0x1
7813 #define TG3_FL_NOT_5705 0x2
7814 #define TG3_FL_NOT_5788 0x4
7815                 u32 read_mask;
7816                 u32 write_mask;
7817         } reg_tbl[] = {
7818                 /* MAC Control Registers */
7819                 { MAC_MODE, TG3_FL_NOT_5705,
7820                         0x00000000, 0x00ef6f8c },
7821                 { MAC_MODE, TG3_FL_5705,
7822                         0x00000000, 0x01ef6b8c },
7823                 { MAC_STATUS, TG3_FL_NOT_5705,
7824                         0x03800107, 0x00000000 },
7825                 { MAC_STATUS, TG3_FL_5705,
7826                         0x03800100, 0x00000000 },
7827                 { MAC_ADDR_0_HIGH, 0x0000,
7828                         0x00000000, 0x0000ffff },
7829                 { MAC_ADDR_0_LOW, 0x0000,
7830                         0x00000000, 0xffffffff },
7831                 { MAC_RX_MTU_SIZE, 0x0000,
7832                         0x00000000, 0x0000ffff },
7833                 { MAC_TX_MODE, 0x0000,
7834                         0x00000000, 0x00000070 },
7835                 { MAC_TX_LENGTHS, 0x0000,
7836                         0x00000000, 0x00003fff },
7837                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7838                         0x00000000, 0x000007fc },
7839                 { MAC_RX_MODE, TG3_FL_5705,
7840                         0x00000000, 0x000007dc },
7841                 { MAC_HASH_REG_0, 0x0000,
7842                         0x00000000, 0xffffffff },
7843                 { MAC_HASH_REG_1, 0x0000,
7844                         0x00000000, 0xffffffff },
7845                 { MAC_HASH_REG_2, 0x0000,
7846                         0x00000000, 0xffffffff },
7847                 { MAC_HASH_REG_3, 0x0000,
7848                         0x00000000, 0xffffffff },
7849
7850                 /* Receive Data and Receive BD Initiator Control Registers. */
7851                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7852                         0x00000000, 0xffffffff },
7853                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7854                         0x00000000, 0xffffffff },
7855                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7856                         0x00000000, 0x00000003 },
7857                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7858                         0x00000000, 0xffffffff },
7859                 { RCVDBDI_STD_BD+0, 0x0000,
7860                         0x00000000, 0xffffffff },
7861                 { RCVDBDI_STD_BD+4, 0x0000,
7862                         0x00000000, 0xffffffff },
7863                 { RCVDBDI_STD_BD+8, 0x0000,
7864                         0x00000000, 0xffff0002 },
7865                 { RCVDBDI_STD_BD+0xc, 0x0000,
7866                         0x00000000, 0xffffffff },
7867         
7868                 /* Receive BD Initiator Control Registers. */
7869                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7870                         0x00000000, 0xffffffff },
7871                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7872                         0x00000000, 0x000003ff },
7873                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7874                         0x00000000, 0xffffffff },
7875         
7876                 /* Host Coalescing Control Registers. */
7877                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7878                         0x00000000, 0x00000004 },
7879                 { HOSTCC_MODE, TG3_FL_5705,
7880                         0x00000000, 0x000000f6 },
7881                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7882                         0x00000000, 0xffffffff },
7883                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7884                         0x00000000, 0x000003ff },
7885                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7886                         0x00000000, 0xffffffff },
7887                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7888                         0x00000000, 0x000003ff },
7889                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7890                         0x00000000, 0xffffffff },
7891                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7892                         0x00000000, 0x000000ff },
7893                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7894                         0x00000000, 0xffffffff },
7895                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7896                         0x00000000, 0x000000ff },
7897                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7898                         0x00000000, 0xffffffff },
7899                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7900                         0x00000000, 0xffffffff },
7901                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7902                         0x00000000, 0xffffffff },
7903                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7904                         0x00000000, 0x000000ff },
7905                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7906                         0x00000000, 0xffffffff },
7907                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7908                         0x00000000, 0x000000ff },
7909                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7910                         0x00000000, 0xffffffff },
7911                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7912                         0x00000000, 0xffffffff },
7913                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7914                         0x00000000, 0xffffffff },
7915                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7916                         0x00000000, 0xffffffff },
7917                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7918                         0x00000000, 0xffffffff },
7919                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7920                         0xffffffff, 0x00000000 },
7921                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7922                         0xffffffff, 0x00000000 },
7923
7924                 /* Buffer Manager Control Registers. */
7925                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7926                         0x00000000, 0x007fff80 },
7927                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7928                         0x00000000, 0x007fffff },
7929                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7930                         0x00000000, 0x0000003f },
7931                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7932                         0x00000000, 0x000001ff },
7933                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7934                         0x00000000, 0x000001ff },
7935                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7936                         0xffffffff, 0x00000000 },
7937                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7938                         0xffffffff, 0x00000000 },
7939         
7940                 /* Mailbox Registers */
7941                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7942                         0x00000000, 0x000001ff },
7943                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7944                         0x00000000, 0x000001ff },
7945                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7946                         0x00000000, 0x000007ff },
7947                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7948                         0x00000000, 0x000001ff },
7949
7950                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7951         };
7952
7953         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7954                 is_5705 = 1;
7955         else
7956                 is_5705 = 0;
7957
7958         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7959                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7960                         continue;
7961
7962                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7963                         continue;
7964
7965                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7966                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7967                         continue;
7968
7969                 offset = (u32) reg_tbl[i].offset;
7970                 read_mask = reg_tbl[i].read_mask;
7971                 write_mask = reg_tbl[i].write_mask;
7972
7973                 /* Save the original register content */
7974                 save_val = tr32(offset);
7975
7976                 /* Determine the read-only value. */
7977                 read_val = save_val & read_mask;
7978
7979                 /* Write zero to the register, then make sure the read-only bits
7980                  * are not changed and the read/write bits are all zeros.
7981                  */
7982                 tw32(offset, 0);
7983
7984                 val = tr32(offset);
7985
7986                 /* Test the read-only and read/write bits. */
7987                 if (((val & read_mask) != read_val) || (val & write_mask))
7988                         goto out;
7989
7990                 /* Write ones to all the bits defined by RdMask and WrMask, then
7991                  * make sure the read-only bits are not changed and the
7992                  * read/write bits are all ones.
7993                  */
7994                 tw32(offset, read_mask | write_mask);
7995
7996                 val = tr32(offset);
7997
7998                 /* Test the read-only bits. */
7999                 if ((val & read_mask) != read_val)
8000                         goto out;
8001
8002                 /* Test the read/write bits. */
8003                 if ((val & write_mask) != write_mask)
8004                         goto out;
8005
8006                 tw32(offset, save_val);
8007         }
8008
8009         return 0;
8010
8011 out:
8012         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8013         tw32(offset, save_val);
8014         return -EIO;
8015 }
8016
8017 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8018 {
8019         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8020         int i;
8021         u32 j;
8022
8023         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8024                 for (j = 0; j < len; j += 4) {
8025                         u32 val;
8026
8027                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8028                         tg3_read_mem(tp, offset + j, &val);
8029                         if (val != test_pattern[i])
8030                                 return -EIO;
8031                 }
8032         }
8033         return 0;
8034 }
8035
8036 static int tg3_test_memory(struct tg3 *tp)
8037 {
8038         static struct mem_entry {
8039                 u32 offset;
8040                 u32 len;
8041         } mem_tbl_570x[] = {
8042                 { 0x00000000, 0x00b50},
8043                 { 0x00002000, 0x1c000},
8044                 { 0xffffffff, 0x00000}
8045         }, mem_tbl_5705[] = {
8046                 { 0x00000100, 0x0000c},
8047                 { 0x00000200, 0x00008},
8048                 { 0x00004000, 0x00800},
8049                 { 0x00006000, 0x01000},
8050                 { 0x00008000, 0x02000},
8051                 { 0x00010000, 0x0e000},
8052                 { 0xffffffff, 0x00000}
8053         };
8054         struct mem_entry *mem_tbl;
8055         int err = 0;
8056         int i;
8057
8058         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8059                 mem_tbl = mem_tbl_5705;
8060         else
8061                 mem_tbl = mem_tbl_570x;
8062
8063         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8064                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8065                     mem_tbl[i].len)) != 0)
8066                         break;
8067         }
8068         
8069         return err;
8070 }
8071
8072 #define TG3_MAC_LOOPBACK        0
8073 #define TG3_PHY_LOOPBACK        1
8074
8075 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8076 {
8077         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8078         u32 desc_idx;
8079         struct sk_buff *skb, *rx_skb;
8080         u8 *tx_data;
8081         dma_addr_t map;
8082         int num_pkts, tx_len, rx_len, i, err;
8083         struct tg3_rx_buffer_desc *desc;
8084
8085         if (loopback_mode == TG3_MAC_LOOPBACK) {
8086                 /* HW errata - mac loopback fails in some cases on 5780.
8087                  * Normal traffic and PHY loopback are not affected by
8088                  * errata.
8089                  */
8090                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8091                         return 0;
8092
8093                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8094                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8095                            MAC_MODE_PORT_MODE_GMII;
8096                 tw32(MAC_MODE, mac_mode);
8097         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8098                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8099                                            BMCR_SPEED1000);
8100                 udelay(40);
8101                 /* reset to prevent losing 1st rx packet intermittently */
8102                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8103                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8104                         udelay(10);
8105                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8106                 }
8107                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8108                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8109                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8110                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8111                 tw32(MAC_MODE, mac_mode);
8112         }
8113         else
8114                 return -EINVAL;
8115
8116         err = -EIO;
8117
8118         tx_len = 1514;
8119         skb = dev_alloc_skb(tx_len);
8120         tx_data = skb_put(skb, tx_len);
8121         memcpy(tx_data, tp->dev->dev_addr, 6);
8122         memset(tx_data + 6, 0x0, 8);
8123
8124         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8125
8126         for (i = 14; i < tx_len; i++)
8127                 tx_data[i] = (u8) (i & 0xff);
8128
8129         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8130
8131         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8132              HOSTCC_MODE_NOW);
8133
8134         udelay(10);
8135
8136         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8137
8138         num_pkts = 0;
8139
8140         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8141
8142         tp->tx_prod++;
8143         num_pkts++;
8144
8145         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8146                      tp->tx_prod);
8147         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8148
8149         udelay(10);
8150
8151         for (i = 0; i < 10; i++) {
8152                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8153                        HOSTCC_MODE_NOW);
8154
8155                 udelay(10);
8156
8157                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8158                 rx_idx = tp->hw_status->idx[0].rx_producer;
8159                 if ((tx_idx == tp->tx_prod) &&
8160                     (rx_idx == (rx_start_idx + num_pkts)))
8161                         break;
8162         }
8163
8164         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8165         dev_kfree_skb(skb);
8166
8167         if (tx_idx != tp->tx_prod)
8168                 goto out;
8169
8170         if (rx_idx != rx_start_idx + num_pkts)
8171                 goto out;
8172
8173         desc = &tp->rx_rcb[rx_start_idx];
8174         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8175         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8176         if (opaque_key != RXD_OPAQUE_RING_STD)
8177                 goto out;
8178
8179         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8180             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8181                 goto out;
8182
8183         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8184         if (rx_len != tx_len)
8185                 goto out;
8186
8187         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8188
8189         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8190         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8191
8192         for (i = 14; i < tx_len; i++) {
8193                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8194                         goto out;
8195         }
8196         err = 0;
8197         
8198         /* tg3_free_rings will unmap and free the rx_skb */
8199 out:
8200         return err;
8201 }
8202
8203 #define TG3_MAC_LOOPBACK_FAILED         1
8204 #define TG3_PHY_LOOPBACK_FAILED         2
8205 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8206                                          TG3_PHY_LOOPBACK_FAILED)
8207
8208 static int tg3_test_loopback(struct tg3 *tp)
8209 {
8210         int err = 0;
8211
8212         if (!netif_running(tp->dev))
8213                 return TG3_LOOPBACK_FAILED;
8214
8215         tg3_reset_hw(tp);
8216
8217         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8218                 err |= TG3_MAC_LOOPBACK_FAILED;
8219         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8220                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8221                         err |= TG3_PHY_LOOPBACK_FAILED;
8222         }
8223
8224         return err;
8225 }
8226
8227 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8228                           u64 *data)
8229 {
8230         struct tg3 *tp = netdev_priv(dev);
8231
8232         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8233
8234         if (tg3_test_nvram(tp) != 0) {
8235                 etest->flags |= ETH_TEST_FL_FAILED;
8236                 data[0] = 1;
8237         }
8238         if (tg3_test_link(tp) != 0) {
8239                 etest->flags |= ETH_TEST_FL_FAILED;
8240                 data[1] = 1;
8241         }
8242         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8243                 int err, irq_sync = 0;
8244
8245                 if (netif_running(dev)) {
8246                         tg3_netif_stop(tp);
8247                         irq_sync = 1;
8248                 }
8249
8250                 tg3_full_lock(tp, irq_sync);
8251
8252                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8253                 err = tg3_nvram_lock(tp);
8254                 tg3_halt_cpu(tp, RX_CPU_BASE);
8255                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8256                         tg3_halt_cpu(tp, TX_CPU_BASE);
8257                 if (!err)
8258                         tg3_nvram_unlock(tp);
8259
8260                 if (tg3_test_registers(tp) != 0) {
8261                         etest->flags |= ETH_TEST_FL_FAILED;
8262                         data[2] = 1;
8263                 }
8264                 if (tg3_test_memory(tp) != 0) {
8265                         etest->flags |= ETH_TEST_FL_FAILED;
8266                         data[3] = 1;
8267                 }
8268                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8269                         etest->flags |= ETH_TEST_FL_FAILED;
8270
8271                 tg3_full_unlock(tp);
8272
8273                 if (tg3_test_interrupt(tp) != 0) {
8274                         etest->flags |= ETH_TEST_FL_FAILED;
8275                         data[5] = 1;
8276                 }
8277
8278                 tg3_full_lock(tp, 0);
8279
8280                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8281                 if (netif_running(dev)) {
8282                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8283                         tg3_init_hw(tp);
8284                         tg3_netif_start(tp);
8285                 }
8286
8287                 tg3_full_unlock(tp);
8288         }
8289 }
8290
8291 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8292 {
8293         struct mii_ioctl_data *data = if_mii(ifr);
8294         struct tg3 *tp = netdev_priv(dev);
8295         int err;
8296
8297         switch(cmd) {
8298         case SIOCGMIIPHY:
8299                 data->phy_id = PHY_ADDR;
8300
8301                 /* fallthru */
8302         case SIOCGMIIREG: {
8303                 u32 mii_regval;
8304
8305                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8306                         break;                  /* We have no PHY */
8307
8308                 spin_lock_bh(&tp->lock);
8309                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8310                 spin_unlock_bh(&tp->lock);
8311
8312                 data->val_out = mii_regval;
8313
8314                 return err;
8315         }
8316
8317         case SIOCSMIIREG:
8318                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8319                         break;                  /* We have no PHY */
8320
8321                 if (!capable(CAP_NET_ADMIN))
8322                         return -EPERM;
8323
8324                 spin_lock_bh(&tp->lock);
8325                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8326                 spin_unlock_bh(&tp->lock);
8327
8328                 return err;
8329
8330         default:
8331                 /* do nothing */
8332                 break;
8333         }
8334         return -EOPNOTSUPP;
8335 }
8336
8337 #if TG3_VLAN_TAG_USED
8338 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8339 {
8340         struct tg3 *tp = netdev_priv(dev);
8341
8342         tg3_full_lock(tp, 0);
8343
8344         tp->vlgrp = grp;
8345
8346         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8347         __tg3_set_rx_mode(dev);
8348
8349         tg3_full_unlock(tp);
8350 }
8351
8352 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8353 {
8354         struct tg3 *tp = netdev_priv(dev);
8355
8356         tg3_full_lock(tp, 0);
8357         if (tp->vlgrp)
8358                 tp->vlgrp->vlan_devices[vid] = NULL;
8359         tg3_full_unlock(tp);
8360 }
8361 #endif
8362
8363 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8364 {
8365         struct tg3 *tp = netdev_priv(dev);
8366
8367         memcpy(ec, &tp->coal, sizeof(*ec));
8368         return 0;
8369 }
8370
8371 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8372 {
8373         struct tg3 *tp = netdev_priv(dev);
8374         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8375         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8376
8377         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8378                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8379                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8380                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8381                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8382         }
8383
8384         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8385             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8386             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8387             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8388             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8389             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8390             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8391             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8392             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8393             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8394                 return -EINVAL;
8395
8396         /* No rx interrupts will be generated if both are zero */
8397         if ((ec->rx_coalesce_usecs == 0) &&
8398             (ec->rx_max_coalesced_frames == 0))
8399                 return -EINVAL;
8400
8401         /* No tx interrupts will be generated if both are zero */
8402         if ((ec->tx_coalesce_usecs == 0) &&
8403             (ec->tx_max_coalesced_frames == 0))
8404                 return -EINVAL;
8405
8406         /* Only copy relevant parameters, ignore all others. */
8407         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8408         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8409         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8410         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8411         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8412         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8413         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8414         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8415         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8416
8417         if (netif_running(dev)) {
8418                 tg3_full_lock(tp, 0);
8419                 __tg3_set_coalesce(tp, &tp->coal);
8420                 tg3_full_unlock(tp);
8421         }
8422         return 0;
8423 }
8424
8425 static struct ethtool_ops tg3_ethtool_ops = {
8426         .get_settings           = tg3_get_settings,
8427         .set_settings           = tg3_set_settings,
8428         .get_drvinfo            = tg3_get_drvinfo,
8429         .get_regs_len           = tg3_get_regs_len,
8430         .get_regs               = tg3_get_regs,
8431         .get_wol                = tg3_get_wol,
8432         .set_wol                = tg3_set_wol,
8433         .get_msglevel           = tg3_get_msglevel,
8434         .set_msglevel           = tg3_set_msglevel,
8435         .nway_reset             = tg3_nway_reset,
8436         .get_link               = ethtool_op_get_link,
8437         .get_eeprom_len         = tg3_get_eeprom_len,
8438         .get_eeprom             = tg3_get_eeprom,
8439         .set_eeprom             = tg3_set_eeprom,
8440         .get_ringparam          = tg3_get_ringparam,
8441         .set_ringparam          = tg3_set_ringparam,
8442         .get_pauseparam         = tg3_get_pauseparam,
8443         .set_pauseparam         = tg3_set_pauseparam,
8444         .get_rx_csum            = tg3_get_rx_csum,
8445         .set_rx_csum            = tg3_set_rx_csum,
8446         .get_tx_csum            = ethtool_op_get_tx_csum,
8447         .set_tx_csum            = tg3_set_tx_csum,
8448         .get_sg                 = ethtool_op_get_sg,
8449         .set_sg                 = ethtool_op_set_sg,
8450 #if TG3_TSO_SUPPORT != 0
8451         .get_tso                = ethtool_op_get_tso,
8452         .set_tso                = tg3_set_tso,
8453 #endif
8454         .self_test_count        = tg3_get_test_count,
8455         .self_test              = tg3_self_test,
8456         .get_strings            = tg3_get_strings,
8457         .phys_id                = tg3_phys_id,
8458         .get_stats_count        = tg3_get_stats_count,
8459         .get_ethtool_stats      = tg3_get_ethtool_stats,
8460         .get_coalesce           = tg3_get_coalesce,
8461         .set_coalesce           = tg3_set_coalesce,
8462         .get_perm_addr          = ethtool_op_get_perm_addr,
8463 };
8464
8465 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8466 {
8467         u32 cursize, val;
8468
8469         tp->nvram_size = EEPROM_CHIP_SIZE;
8470
8471         if (tg3_nvram_read(tp, 0, &val) != 0)
8472                 return;
8473
8474         if (swab32(val) != TG3_EEPROM_MAGIC)
8475                 return;
8476
8477         /*
8478          * Size the chip by reading offsets at increasing powers of two.
8479          * When we encounter our validation signature, we know the addressing
8480          * has wrapped around, and thus have our chip size.
8481          */
8482         cursize = 0x800;
8483
8484         while (cursize < tp->nvram_size) {
8485                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8486                         return;
8487
8488                 if (swab32(val) == TG3_EEPROM_MAGIC)
8489                         break;
8490
8491                 cursize <<= 1;
8492         }
8493
8494         tp->nvram_size = cursize;
8495 }
8496                 
8497 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8498 {
8499         u32 val;
8500
8501         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8502                 if (val != 0) {
8503                         tp->nvram_size = (val >> 16) * 1024;
8504                         return;
8505                 }
8506         }
8507         tp->nvram_size = 0x20000;
8508 }
8509
8510 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8511 {
8512         u32 nvcfg1;
8513
8514         nvcfg1 = tr32(NVRAM_CFG1);
8515         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8516                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8517         }
8518         else {
8519                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8520                 tw32(NVRAM_CFG1, nvcfg1);
8521         }
8522
8523         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8524             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8525                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8526                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8527                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8528                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8529                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8530                                 break;
8531                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8532                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8533                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8534                                 break;
8535                         case FLASH_VENDOR_ATMEL_EEPROM:
8536                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8537                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8538                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8539                                 break;
8540                         case FLASH_VENDOR_ST:
8541                                 tp->nvram_jedecnum = JEDEC_ST;
8542                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8543                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8544                                 break;
8545                         case FLASH_VENDOR_SAIFUN:
8546                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8547                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8548                                 break;
8549                         case FLASH_VENDOR_SST_SMALL:
8550                         case FLASH_VENDOR_SST_LARGE:
8551                                 tp->nvram_jedecnum = JEDEC_SST;
8552                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8553                                 break;
8554                 }
8555         }
8556         else {
8557                 tp->nvram_jedecnum = JEDEC_ATMEL;
8558                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8559                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8560         }
8561 }
8562
8563 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8564 {
8565         u32 nvcfg1;
8566
8567         nvcfg1 = tr32(NVRAM_CFG1);
8568
8569         /* NVRAM protection for TPM */
8570         if (nvcfg1 & (1 << 27))
8571                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8572
8573         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8574                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8575                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8576                         tp->nvram_jedecnum = JEDEC_ATMEL;
8577                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8578                         break;
8579                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8580                         tp->nvram_jedecnum = JEDEC_ATMEL;
8581                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8582                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8583                         break;
8584                 case FLASH_5752VENDOR_ST_M45PE10:
8585                 case FLASH_5752VENDOR_ST_M45PE20:
8586                 case FLASH_5752VENDOR_ST_M45PE40:
8587                         tp->nvram_jedecnum = JEDEC_ST;
8588                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8589                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8590                         break;
8591         }
8592
8593         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8594                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8595                         case FLASH_5752PAGE_SIZE_256:
8596                                 tp->nvram_pagesize = 256;
8597                                 break;
8598                         case FLASH_5752PAGE_SIZE_512:
8599                                 tp->nvram_pagesize = 512;
8600                                 break;
8601                         case FLASH_5752PAGE_SIZE_1K:
8602                                 tp->nvram_pagesize = 1024;
8603                                 break;
8604                         case FLASH_5752PAGE_SIZE_2K:
8605                                 tp->nvram_pagesize = 2048;
8606                                 break;
8607                         case FLASH_5752PAGE_SIZE_4K:
8608                                 tp->nvram_pagesize = 4096;
8609                                 break;
8610                         case FLASH_5752PAGE_SIZE_264:
8611                                 tp->nvram_pagesize = 264;
8612                                 break;
8613                 }
8614         }
8615         else {
8616                 /* For eeprom, set pagesize to maximum eeprom size */
8617                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8618
8619                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8620                 tw32(NVRAM_CFG1, nvcfg1);
8621         }
8622 }
8623
8624 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8625 static void __devinit tg3_nvram_init(struct tg3 *tp)
8626 {
8627         int j;
8628
8629         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8630                 return;
8631
8632         tw32_f(GRC_EEPROM_ADDR,
8633              (EEPROM_ADDR_FSM_RESET |
8634               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8635                EEPROM_ADDR_CLKPERD_SHIFT)));
8636
8637         /* XXX schedule_timeout() ... */
8638         for (j = 0; j < 100; j++)
8639                 udelay(10);
8640
8641         /* Enable seeprom accesses. */
8642         tw32_f(GRC_LOCAL_CTRL,
8643              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8644         udelay(100);
8645
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8647             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8648                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8649
8650                 if (tg3_nvram_lock(tp)) {
8651                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8652                                "tg3_nvram_init failed.\n", tp->dev->name);
8653                         return;
8654                 }
8655                 tg3_enable_nvram_access(tp);
8656
8657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8658                         tg3_get_5752_nvram_info(tp);
8659                 else
8660                         tg3_get_nvram_info(tp);
8661
8662                 tg3_get_nvram_size(tp);
8663
8664                 tg3_disable_nvram_access(tp);
8665                 tg3_nvram_unlock(tp);
8666
8667         } else {
8668                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8669
8670                 tg3_get_eeprom_size(tp);
8671         }
8672 }
8673
8674 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8675                                         u32 offset, u32 *val)
8676 {
8677         u32 tmp;
8678         int i;
8679
8680         if (offset > EEPROM_ADDR_ADDR_MASK ||
8681             (offset % 4) != 0)
8682                 return -EINVAL;
8683
8684         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8685                                         EEPROM_ADDR_DEVID_MASK |
8686                                         EEPROM_ADDR_READ);
8687         tw32(GRC_EEPROM_ADDR,
8688              tmp |
8689              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8690              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8691               EEPROM_ADDR_ADDR_MASK) |
8692              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8693
8694         for (i = 0; i < 10000; i++) {
8695                 tmp = tr32(GRC_EEPROM_ADDR);
8696
8697                 if (tmp & EEPROM_ADDR_COMPLETE)
8698                         break;
8699                 udelay(100);
8700         }
8701         if (!(tmp & EEPROM_ADDR_COMPLETE))
8702                 return -EBUSY;
8703
8704         *val = tr32(GRC_EEPROM_DATA);
8705         return 0;
8706 }
8707
8708 #define NVRAM_CMD_TIMEOUT 10000
8709
8710 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8711 {
8712         int i;
8713
8714         tw32(NVRAM_CMD, nvram_cmd);
8715         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8716                 udelay(10);
8717                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8718                         udelay(10);
8719                         break;
8720                 }
8721         }
8722         if (i == NVRAM_CMD_TIMEOUT) {
8723                 return -EBUSY;
8724         }
8725         return 0;
8726 }
8727
8728 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8729 {
8730         int ret;
8731
8732         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8733                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8734                 return -EINVAL;
8735         }
8736
8737         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8738                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8739
8740         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8741                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8742                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8743
8744                 offset = ((offset / tp->nvram_pagesize) <<
8745                           ATMEL_AT45DB0X1B_PAGE_POS) +
8746                         (offset % tp->nvram_pagesize);
8747         }
8748
8749         if (offset > NVRAM_ADDR_MSK)
8750                 return -EINVAL;
8751
8752         ret = tg3_nvram_lock(tp);
8753         if (ret)
8754                 return ret;
8755
8756         tg3_enable_nvram_access(tp);
8757
8758         tw32(NVRAM_ADDR, offset);
8759         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8760                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8761
8762         if (ret == 0)
8763                 *val = swab32(tr32(NVRAM_RDDATA));
8764
8765         tg3_disable_nvram_access(tp);
8766
8767         tg3_nvram_unlock(tp);
8768
8769         return ret;
8770 }
8771
8772 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8773                                     u32 offset, u32 len, u8 *buf)
8774 {
8775         int i, j, rc = 0;
8776         u32 val;
8777
8778         for (i = 0; i < len; i += 4) {
8779                 u32 addr, data;
8780
8781                 addr = offset + i;
8782
8783                 memcpy(&data, buf + i, 4);
8784
8785                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8786
8787                 val = tr32(GRC_EEPROM_ADDR);
8788                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8789
8790                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8791                         EEPROM_ADDR_READ);
8792                 tw32(GRC_EEPROM_ADDR, val |
8793                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8794                         (addr & EEPROM_ADDR_ADDR_MASK) |
8795                         EEPROM_ADDR_START |
8796                         EEPROM_ADDR_WRITE);
8797                 
8798                 for (j = 0; j < 10000; j++) {
8799                         val = tr32(GRC_EEPROM_ADDR);
8800
8801                         if (val & EEPROM_ADDR_COMPLETE)
8802                                 break;
8803                         udelay(100);
8804                 }
8805                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8806                         rc = -EBUSY;
8807                         break;
8808                 }
8809         }
8810
8811         return rc;
8812 }
8813
8814 /* offset and length are dword aligned */
8815 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8816                 u8 *buf)
8817 {
8818         int ret = 0;
8819         u32 pagesize = tp->nvram_pagesize;
8820         u32 pagemask = pagesize - 1;
8821         u32 nvram_cmd;
8822         u8 *tmp;
8823
8824         tmp = kmalloc(pagesize, GFP_KERNEL);
8825         if (tmp == NULL)
8826                 return -ENOMEM;
8827
8828         while (len) {
8829                 int j;
8830                 u32 phy_addr, page_off, size;
8831
8832                 phy_addr = offset & ~pagemask;
8833         
8834                 for (j = 0; j < pagesize; j += 4) {
8835                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8836                                                 (u32 *) (tmp + j))))
8837                                 break;
8838                 }
8839                 if (ret)
8840                         break;
8841
8842                 page_off = offset & pagemask;
8843                 size = pagesize;
8844                 if (len < size)
8845                         size = len;
8846
8847                 len -= size;
8848
8849                 memcpy(tmp + page_off, buf, size);
8850
8851                 offset = offset + (pagesize - page_off);
8852
8853                 tg3_enable_nvram_access(tp);
8854
8855                 /*
8856                  * Before we can erase the flash page, we need
8857                  * to issue a special "write enable" command.
8858                  */
8859                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8860
8861                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8862                         break;
8863
8864                 /* Erase the target page */
8865                 tw32(NVRAM_ADDR, phy_addr);
8866
8867                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8868                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8869
8870                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8871                         break;
8872
8873                 /* Issue another write enable to start the write. */
8874                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8875
8876                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8877                         break;
8878
8879                 for (j = 0; j < pagesize; j += 4) {
8880                         u32 data;
8881
8882                         data = *((u32 *) (tmp + j));
8883                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8884
8885                         tw32(NVRAM_ADDR, phy_addr + j);
8886
8887                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8888                                 NVRAM_CMD_WR;
8889
8890                         if (j == 0)
8891                                 nvram_cmd |= NVRAM_CMD_FIRST;
8892                         else if (j == (pagesize - 4))
8893                                 nvram_cmd |= NVRAM_CMD_LAST;
8894
8895                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8896                                 break;
8897                 }
8898                 if (ret)
8899                         break;
8900         }
8901
8902         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8903         tg3_nvram_exec_cmd(tp, nvram_cmd);
8904
8905         kfree(tmp);
8906
8907         return ret;
8908 }
8909
8910 /* offset and length are dword aligned */
8911 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8912                 u8 *buf)
8913 {
8914         int i, ret = 0;
8915
8916         for (i = 0; i < len; i += 4, offset += 4) {
8917                 u32 data, page_off, phy_addr, nvram_cmd;
8918
8919                 memcpy(&data, buf + i, 4);
8920                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8921
8922                 page_off = offset % tp->nvram_pagesize;
8923
8924                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8925                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8926
8927                         phy_addr = ((offset / tp->nvram_pagesize) <<
8928                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8929                 }
8930                 else {
8931                         phy_addr = offset;
8932                 }
8933
8934                 tw32(NVRAM_ADDR, phy_addr);
8935
8936                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8937
8938                 if ((page_off == 0) || (i == 0))
8939                         nvram_cmd |= NVRAM_CMD_FIRST;
8940                 else if (page_off == (tp->nvram_pagesize - 4))
8941                         nvram_cmd |= NVRAM_CMD_LAST;
8942
8943                 if (i == (len - 4))
8944                         nvram_cmd |= NVRAM_CMD_LAST;
8945
8946                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8947                     (tp->nvram_jedecnum == JEDEC_ST) &&
8948                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8949
8950                         if ((ret = tg3_nvram_exec_cmd(tp,
8951                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8952                                 NVRAM_CMD_DONE)))
8953
8954                                 break;
8955                 }
8956                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8957                         /* We always do complete word writes to eeprom. */
8958                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8959                 }
8960
8961                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8962                         break;
8963         }
8964         return ret;
8965 }
8966
8967 /* offset and length are dword aligned */
8968 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8969 {
8970         int ret;
8971
8972         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8973                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8974                 return -EINVAL;
8975         }
8976
8977         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8978                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8979                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8980                 udelay(40);
8981         }
8982
8983         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8984                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8985         }
8986         else {
8987                 u32 grc_mode;
8988
8989                 ret = tg3_nvram_lock(tp);
8990                 if (ret)
8991                         return ret;
8992
8993                 tg3_enable_nvram_access(tp);
8994                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8995                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8996                         tw32(NVRAM_WRITE1, 0x406);
8997
8998                 grc_mode = tr32(GRC_MODE);
8999                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9000
9001                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9002                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9003
9004                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9005                                 buf);
9006                 }
9007                 else {
9008                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9009                                 buf);
9010                 }
9011
9012                 grc_mode = tr32(GRC_MODE);
9013                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9014
9015                 tg3_disable_nvram_access(tp);
9016                 tg3_nvram_unlock(tp);
9017         }
9018
9019         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9020                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9021                 udelay(40);
9022         }
9023
9024         return ret;
9025 }
9026
9027 struct subsys_tbl_ent {
9028         u16 subsys_vendor, subsys_devid;
9029         u32 phy_id;
9030 };
9031
9032 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9033         /* Broadcom boards. */
9034         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9035         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9036         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9037         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9038         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9039         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9040         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9041         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9042         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9043         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9044         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9045
9046         /* 3com boards. */
9047         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9048         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9049         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9050         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9051         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9052
9053         /* DELL boards. */
9054         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9055         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9056         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9057         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9058
9059         /* Compaq boards. */
9060         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9061         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9062         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9063         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9064         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9065
9066         /* IBM boards. */
9067         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9068 };
9069
9070 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9071 {
9072         int i;
9073
9074         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9075                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9076                      tp->pdev->subsystem_vendor) &&
9077                     (subsys_id_to_phy_id[i].subsys_devid ==
9078                      tp->pdev->subsystem_device))
9079                         return &subsys_id_to_phy_id[i];
9080         }
9081         return NULL;
9082 }
9083
9084 /* Since this function may be called in D3-hot power state during
9085  * tg3_init_one(), only config cycles are allowed.
9086  */
9087 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9088 {
9089         u32 val;
9090
9091         /* Make sure register accesses (indirect or otherwise)
9092          * will function correctly.
9093          */
9094         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9095                                tp->misc_host_ctrl);
9096
9097         tp->phy_id = PHY_ID_INVALID;
9098         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9099
9100         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9101         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9102                 u32 nic_cfg, led_cfg;
9103                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9104                 int eeprom_phy_serdes = 0;
9105
9106                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9107                 tp->nic_sram_data_cfg = nic_cfg;
9108
9109                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9110                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9111                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9112                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9113                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9114                     (ver > 0) && (ver < 0x100))
9115                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9116
9117                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9118                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9119                         eeprom_phy_serdes = 1;
9120
9121                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9122                 if (nic_phy_id != 0) {
9123                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9124                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9125
9126                         eeprom_phy_id  = (id1 >> 16) << 10;
9127                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9128                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9129                 } else
9130                         eeprom_phy_id = 0;
9131
9132                 tp->phy_id = eeprom_phy_id;
9133                 if (eeprom_phy_serdes) {
9134                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9135                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9136                         else
9137                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9138                 }
9139
9140                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9141                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9142                                     SHASTA_EXT_LED_MODE_MASK);
9143                 else
9144                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9145
9146                 switch (led_cfg) {
9147                 default:
9148                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9149                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9150                         break;
9151
9152                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9153                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9154                         break;
9155
9156                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9157                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9158
9159                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9160                          * read on some older 5700/5701 bootcode.
9161                          */
9162                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9163                             ASIC_REV_5700 ||
9164                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9165                             ASIC_REV_5701)
9166                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9167
9168                         break;
9169
9170                 case SHASTA_EXT_LED_SHARED:
9171                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9172                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9173                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9174                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9175                                                  LED_CTRL_MODE_PHY_2);
9176                         break;
9177
9178                 case SHASTA_EXT_LED_MAC:
9179                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9180                         break;
9181
9182                 case SHASTA_EXT_LED_COMBO:
9183                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9184                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9185                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9186                                                  LED_CTRL_MODE_PHY_2);
9187                         break;
9188
9189                 };
9190
9191                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9192                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9193                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9194                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9195
9196                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9197                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9198                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9199                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9200
9201                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9202                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9203                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9204                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9205                 }
9206                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9207                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9208
9209                 if (cfg2 & (1 << 17))
9210                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9211
9212                 /* serdes signal pre-emphasis in register 0x590 set by */
9213                 /* bootcode if bit 18 is set */
9214                 if (cfg2 & (1 << 18))
9215                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9216         }
9217 }
9218
9219 static int __devinit tg3_phy_probe(struct tg3 *tp)
9220 {
9221         u32 hw_phy_id_1, hw_phy_id_2;
9222         u32 hw_phy_id, hw_phy_id_masked;
9223         int err;
9224
9225         /* Reading the PHY ID register can conflict with ASF
9226          * firwmare access to the PHY hardware.
9227          */
9228         err = 0;
9229         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9230                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9231         } else {
9232                 /* Now read the physical PHY_ID from the chip and verify
9233                  * that it is sane.  If it doesn't look good, we fall back
9234                  * to either the hard-coded table based PHY_ID and failing
9235                  * that the value found in the eeprom area.
9236                  */
9237                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9238                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9239
9240                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9241                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9242                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9243
9244                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9245         }
9246
9247         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9248                 tp->phy_id = hw_phy_id;
9249                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9250                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9251                 else
9252                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9253         } else {
9254                 if (tp->phy_id != PHY_ID_INVALID) {
9255                         /* Do nothing, phy ID already set up in
9256                          * tg3_get_eeprom_hw_cfg().
9257                          */
9258                 } else {
9259                         struct subsys_tbl_ent *p;
9260
9261                         /* No eeprom signature?  Try the hardcoded
9262                          * subsys device table.
9263                          */
9264                         p = lookup_by_subsys(tp);
9265                         if (!p)
9266                                 return -ENODEV;
9267
9268                         tp->phy_id = p->phy_id;
9269                         if (!tp->phy_id ||
9270                             tp->phy_id == PHY_ID_BCM8002)
9271                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9272                 }
9273         }
9274
9275         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9276             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9277                 u32 bmsr, adv_reg, tg3_ctrl;
9278
9279                 tg3_readphy(tp, MII_BMSR, &bmsr);
9280                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9281                     (bmsr & BMSR_LSTATUS))
9282                         goto skip_phy_reset;
9283                     
9284                 err = tg3_phy_reset(tp);
9285                 if (err)
9286                         return err;
9287
9288                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9289                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9290                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9291                 tg3_ctrl = 0;
9292                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9293                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9294                                     MII_TG3_CTRL_ADV_1000_FULL);
9295                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9296                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9297                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9298                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9299                 }
9300
9301                 if (!tg3_copper_is_advertising_all(tp)) {
9302                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9303
9304                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9305                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9306
9307                         tg3_writephy(tp, MII_BMCR,
9308                                      BMCR_ANENABLE | BMCR_ANRESTART);
9309                 }
9310                 tg3_phy_set_wirespeed(tp);
9311
9312                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9313                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9314                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9315         }
9316
9317 skip_phy_reset:
9318         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9319                 err = tg3_init_5401phy_dsp(tp);
9320                 if (err)
9321                         return err;
9322         }
9323
9324         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9325                 err = tg3_init_5401phy_dsp(tp);
9326         }
9327
9328         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9329                 tp->link_config.advertising =
9330                         (ADVERTISED_1000baseT_Half |
9331                          ADVERTISED_1000baseT_Full |
9332                          ADVERTISED_Autoneg |
9333                          ADVERTISED_FIBRE);
9334         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9335                 tp->link_config.advertising &=
9336                         ~(ADVERTISED_1000baseT_Half |
9337                           ADVERTISED_1000baseT_Full);
9338
9339         return err;
9340 }
9341
9342 static void __devinit tg3_read_partno(struct tg3 *tp)
9343 {
9344         unsigned char vpd_data[256];
9345         int i;
9346
9347         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9348                 /* Sun decided not to put the necessary bits in the
9349                  * NVRAM of their onboard tg3 parts :(
9350                  */
9351                 strcpy(tp->board_part_number, "Sun 570X");
9352                 return;
9353         }
9354
9355         for (i = 0; i < 256; i += 4) {
9356                 u32 tmp;
9357
9358                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9359                         goto out_not_found;
9360
9361                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9362                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9363                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9364                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9365         }
9366
9367         /* Now parse and find the part number. */
9368         for (i = 0; i < 256; ) {
9369                 unsigned char val = vpd_data[i];
9370                 int block_end;
9371
9372                 if (val == 0x82 || val == 0x91) {
9373                         i = (i + 3 +
9374                              (vpd_data[i + 1] +
9375                               (vpd_data[i + 2] << 8)));
9376                         continue;
9377                 }
9378
9379                 if (val != 0x90)
9380                         goto out_not_found;
9381
9382                 block_end = (i + 3 +
9383                              (vpd_data[i + 1] +
9384                               (vpd_data[i + 2] << 8)));
9385                 i += 3;
9386                 while (i < block_end) {
9387                         if (vpd_data[i + 0] == 'P' &&
9388                             vpd_data[i + 1] == 'N') {
9389                                 int partno_len = vpd_data[i + 2];
9390
9391                                 if (partno_len > 24)
9392                                         goto out_not_found;
9393
9394                                 memcpy(tp->board_part_number,
9395                                        &vpd_data[i + 3],
9396                                        partno_len);
9397
9398                                 /* Success. */
9399                                 return;
9400                         }
9401                 }
9402
9403                 /* Part number not found. */
9404                 goto out_not_found;
9405         }
9406
9407 out_not_found:
9408         strcpy(tp->board_part_number, "none");
9409 }
9410
9411 #ifdef CONFIG_SPARC64
9412 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9413 {
9414         struct pci_dev *pdev = tp->pdev;
9415         struct pcidev_cookie *pcp = pdev->sysdata;
9416
9417         if (pcp != NULL) {
9418                 int node = pcp->prom_node;
9419                 u32 venid;
9420                 int err;
9421
9422                 err = prom_getproperty(node, "subsystem-vendor-id",
9423                                        (char *) &venid, sizeof(venid));
9424                 if (err == 0 || err == -1)
9425                         return 0;
9426                 if (venid == PCI_VENDOR_ID_SUN)
9427                         return 1;
9428
9429                 /* TG3 chips onboard the SunBlade-2500 don't have the
9430                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9431                  * are distinguishable from non-Sun variants by being
9432                  * named "network" by the firmware.  Non-Sun cards will
9433                  * show up as being named "ethernet".
9434                  */
9435                 if (!strcmp(pcp->prom_name, "network"))
9436                         return 1;
9437         }
9438         return 0;
9439 }
9440 #endif
9441
9442 static int __devinit tg3_get_invariants(struct tg3 *tp)
9443 {
9444         static struct pci_device_id write_reorder_chipsets[] = {
9445                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9446                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9447                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9448                              PCI_DEVICE_ID_VIA_8385_0) },
9449                 { },
9450         };
9451         u32 misc_ctrl_reg;
9452         u32 cacheline_sz_reg;
9453         u32 pci_state_reg, grc_misc_cfg;
9454         u32 val;
9455         u16 pci_cmd;
9456         int err;
9457
9458 #ifdef CONFIG_SPARC64
9459         if (tg3_is_sun_570X(tp))
9460                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9461 #endif
9462
9463         /* Force memory write invalidate off.  If we leave it on,
9464          * then on 5700_BX chips we have to enable a workaround.
9465          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9466          * to match the cacheline size.  The Broadcom driver have this
9467          * workaround but turns MWI off all the times so never uses
9468          * it.  This seems to suggest that the workaround is insufficient.
9469          */
9470         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9471         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9472         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9473
9474         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9475          * has the register indirect write enable bit set before
9476          * we try to access any of the MMIO registers.  It is also
9477          * critical that the PCI-X hw workaround situation is decided
9478          * before that as well.
9479          */
9480         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9481                               &misc_ctrl_reg);
9482
9483         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9484                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9485
9486         /* Wrong chip ID in 5752 A0. This code can be removed later
9487          * as A0 is not in production.
9488          */
9489         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9490                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9491
9492         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9493          * we need to disable memory and use config. cycles
9494          * only to access all registers. The 5702/03 chips
9495          * can mistakenly decode the special cycles from the
9496          * ICH chipsets as memory write cycles, causing corruption
9497          * of register and memory space. Only certain ICH bridges
9498          * will drive special cycles with non-zero data during the
9499          * address phase which can fall within the 5703's address
9500          * range. This is not an ICH bug as the PCI spec allows
9501          * non-zero address during special cycles. However, only
9502          * these ICH bridges are known to drive non-zero addresses
9503          * during special cycles.
9504          *
9505          * Since special cycles do not cross PCI bridges, we only
9506          * enable this workaround if the 5703 is on the secondary
9507          * bus of these ICH bridges.
9508          */
9509         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9510             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9511                 static struct tg3_dev_id {
9512                         u32     vendor;
9513                         u32     device;
9514                         u32     rev;
9515                 } ich_chipsets[] = {
9516                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9517                           PCI_ANY_ID },
9518                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9519                           PCI_ANY_ID },
9520                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9521                           0xa },
9522                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9523                           PCI_ANY_ID },
9524                         { },
9525                 };
9526                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9527                 struct pci_dev *bridge = NULL;
9528
9529                 while (pci_id->vendor != 0) {
9530                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9531                                                 bridge);
9532                         if (!bridge) {
9533                                 pci_id++;
9534                                 continue;
9535                         }
9536                         if (pci_id->rev != PCI_ANY_ID) {
9537                                 u8 rev;
9538
9539                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9540                                                      &rev);
9541                                 if (rev > pci_id->rev)
9542                                         continue;
9543                         }
9544                         if (bridge->subordinate &&
9545                             (bridge->subordinate->number ==
9546                              tp->pdev->bus->number)) {
9547
9548                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9549                                 pci_dev_put(bridge);
9550                                 break;
9551                         }
9552                 }
9553         }
9554
9555         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9556          * DMA addresses > 40-bit. This bridge may have other additional
9557          * 57xx devices behind it in some 4-port NIC designs for example.
9558          * Any tg3 device found behind the bridge will also need the 40-bit
9559          * DMA workaround.
9560          */
9561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9562             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9563                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9564                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9565                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9566         }
9567         else {
9568                 struct pci_dev *bridge = NULL;
9569
9570                 do {
9571                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9572                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
9573                                                 bridge);
9574                         if (bridge && bridge->subordinate &&
9575                             (bridge->subordinate->number <=
9576                              tp->pdev->bus->number) &&
9577                             (bridge->subordinate->subordinate >=
9578                              tp->pdev->bus->number)) {
9579                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9580                                 pci_dev_put(bridge);
9581                                 break;
9582                         }
9583                 } while (bridge);
9584         }
9585
9586         /* Initialize misc host control in PCI block. */
9587         tp->misc_host_ctrl |= (misc_ctrl_reg &
9588                                MISC_HOST_CTRL_CHIPREV);
9589         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9590                                tp->misc_host_ctrl);
9591
9592         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9593                               &cacheline_sz_reg);
9594
9595         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9596         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9597         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9598         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9599
9600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9602             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9603                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9604
9605         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9606             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9607                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9608
9609         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9610                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9611
9612         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9613             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9614             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9615                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9616
9617         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9618                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9619
9620         /* If we have an AMD 762 or VIA K8T800 chipset, write
9621          * reordering to the mailbox registers done by the host
9622          * controller can cause major troubles.  We read back from
9623          * every mailbox register write to force the writes to be
9624          * posted to the chip in order.
9625          */
9626         if (pci_dev_present(write_reorder_chipsets) &&
9627             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9628                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9629
9630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9631             tp->pci_lat_timer < 64) {
9632                 tp->pci_lat_timer = 64;
9633
9634                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9635                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9636                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9637                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9638
9639                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9640                                        cacheline_sz_reg);
9641         }
9642
9643         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9644                               &pci_state_reg);
9645
9646         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9647                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9648
9649                 /* If this is a 5700 BX chipset, and we are in PCI-X
9650                  * mode, enable register write workaround.
9651                  *
9652                  * The workaround is to use indirect register accesses
9653                  * for all chip writes not to mailbox registers.
9654                  */
9655                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9656                         u32 pm_reg;
9657                         u16 pci_cmd;
9658
9659                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9660
9661                         /* The chip can have it's power management PCI config
9662                          * space registers clobbered due to this bug.
9663                          * So explicitly force the chip into D0 here.
9664                          */
9665                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9666                                               &pm_reg);
9667                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9668                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9669                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9670                                                pm_reg);
9671
9672                         /* Also, force SERR#/PERR# in PCI command. */
9673                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9674                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9675                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9676                 }
9677         }
9678
9679         /* 5700 BX chips need to have their TX producer index mailboxes
9680          * written twice to workaround a bug.
9681          */
9682         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9683                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9684
9685         /* Back to back register writes can cause problems on this chip,
9686          * the workaround is to read back all reg writes except those to
9687          * mailbox regs.  See tg3_write_indirect_reg32().
9688          *
9689          * PCI Express 5750_A0 rev chips need this workaround too.
9690          */
9691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9692             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9693              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9694                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9695
9696         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9697                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9698         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9699                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9700
9701         /* Chip-specific fixup from Broadcom driver */
9702         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9703             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9704                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9705                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9706         }
9707
9708         /* Default fast path register access methods */
9709         tp->read32 = tg3_read32;
9710         tp->write32 = tg3_write32;
9711         tp->read32_mbox = tg3_read32;
9712         tp->write32_mbox = tg3_write32;
9713         tp->write32_tx_mbox = tg3_write32;
9714         tp->write32_rx_mbox = tg3_write32;
9715
9716         /* Various workaround register access methods */
9717         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9718                 tp->write32 = tg3_write_indirect_reg32;
9719         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9720                 tp->write32 = tg3_write_flush_reg32;
9721
9722         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9723             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9724                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9725                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9726                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9727         }
9728
9729         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9730                 tp->read32 = tg3_read_indirect_reg32;
9731                 tp->write32 = tg3_write_indirect_reg32;
9732                 tp->read32_mbox = tg3_read_indirect_mbox;
9733                 tp->write32_mbox = tg3_write_indirect_mbox;
9734                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9735                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9736
9737                 iounmap(tp->regs);
9738                 tp->regs = NULL;
9739
9740                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9741                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9742                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9743         }
9744
9745         /* Get eeprom hw config before calling tg3_set_power_state().
9746          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9747          * determined before calling tg3_set_power_state() so that
9748          * we know whether or not to switch out of Vaux power.
9749          * When the flag is set, it means that GPIO1 is used for eeprom
9750          * write protect and also implies that it is a LOM where GPIOs
9751          * are not used to switch power.
9752          */ 
9753         tg3_get_eeprom_hw_cfg(tp);
9754
9755         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9756          * GPIO1 driven high will bring 5700's external PHY out of reset.
9757          * It is also used as eeprom write protect on LOMs.
9758          */
9759         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9760         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9761             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9762                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9763                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9764         /* Unused GPIO3 must be driven as output on 5752 because there
9765          * are no pull-up resistors on unused GPIO pins.
9766          */
9767         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9768                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9769
9770         /* Force the chip into D0. */
9771         err = tg3_set_power_state(tp, 0);
9772         if (err) {
9773                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9774                        pci_name(tp->pdev));
9775                 return err;
9776         }
9777
9778         /* 5700 B0 chips do not support checksumming correctly due
9779          * to hardware bugs.
9780          */
9781         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9782                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9783
9784         /* Pseudo-header checksum is done by hardware logic and not
9785          * the offload processers, so make the chip do the pseudo-
9786          * header checksums on receive.  For transmit it is more
9787          * convenient to do the pseudo-header checksum in software
9788          * as Linux does that on transmit for us in all cases.
9789          */
9790         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9791         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9792
9793         /* Derive initial jumbo mode from MTU assigned in
9794          * ether_setup() via the alloc_etherdev() call
9795          */
9796         if (tp->dev->mtu > ETH_DATA_LEN &&
9797             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9798                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9799
9800         /* Determine WakeOnLan speed to use. */
9801         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9802             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9803             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9804             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9805                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9806         } else {
9807                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9808         }
9809
9810         /* A few boards don't want Ethernet@WireSpeed phy feature */
9811         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9812             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9813              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9814              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9815             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9816                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9817
9818         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9819             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9820                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9821         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9822                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9823
9824         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9825                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9826
9827         tp->coalesce_mode = 0;
9828         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9829             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9830                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9831
9832         /* Initialize MAC MI mode, polling disabled. */
9833         tw32_f(MAC_MI_MODE, tp->mi_mode);
9834         udelay(80);
9835
9836         /* Initialize data/descriptor byte/word swapping. */
9837         val = tr32(GRC_MODE);
9838         val &= GRC_MODE_HOST_STACKUP;
9839         tw32(GRC_MODE, val | tp->grc_mode);
9840
9841         tg3_switch_clocks(tp);
9842
9843         /* Clear this out for sanity. */
9844         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9845
9846         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9847                               &pci_state_reg);
9848         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9849             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9850                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9851
9852                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9853                     chiprevid == CHIPREV_ID_5701_B0 ||
9854                     chiprevid == CHIPREV_ID_5701_B2 ||
9855                     chiprevid == CHIPREV_ID_5701_B5) {
9856                         void __iomem *sram_base;
9857
9858                         /* Write some dummy words into the SRAM status block
9859                          * area, see if it reads back correctly.  If the return
9860                          * value is bad, force enable the PCIX workaround.
9861                          */
9862                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9863
9864                         writel(0x00000000, sram_base);
9865                         writel(0x00000000, sram_base + 4);
9866                         writel(0xffffffff, sram_base + 4);
9867                         if (readl(sram_base) != 0x00000000)
9868                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9869                 }
9870         }
9871
9872         udelay(50);
9873         tg3_nvram_init(tp);
9874
9875         grc_misc_cfg = tr32(GRC_MISC_CFG);
9876         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9877
9878         /* Broadcom's driver says that CIOBE multisplit has a bug */
9879 #if 0
9880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9881             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9882                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9883                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9884         }
9885 #endif
9886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9887             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9888              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9889                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9890
9891         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9892             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9893                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9894         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9895                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9896                                       HOSTCC_MODE_CLRTICK_TXBD);
9897
9898                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9899                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9900                                        tp->misc_host_ctrl);
9901         }
9902
9903         /* these are limited to 10/100 only */
9904         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9905              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9906             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9907              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9908              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9909               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9910               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9911             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9912              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9913               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9914                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9915
9916         err = tg3_phy_probe(tp);
9917         if (err) {
9918                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9919                        pci_name(tp->pdev), err);
9920                 /* ... but do not return immediately ... */
9921         }
9922
9923         tg3_read_partno(tp);
9924
9925         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9926                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9927         } else {
9928                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9929                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9930                 else
9931                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9932         }
9933
9934         /* 5700 {AX,BX} chips have a broken status block link
9935          * change bit implementation, so we must use the
9936          * status register in those cases.
9937          */
9938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9939                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9940         else
9941                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9942
9943         /* The led_ctrl is set during tg3_phy_probe, here we might
9944          * have to force the link status polling mechanism based
9945          * upon subsystem IDs.
9946          */
9947         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9948             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9949                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9950                                   TG3_FLAG_USE_LINKCHG_REG);
9951         }
9952
9953         /* For all SERDES we poll the MAC status register. */
9954         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9955                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9956         else
9957                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9958
9959         /* It seems all chips can get confused if TX buffers
9960          * straddle the 4GB address boundary in some cases.
9961          */
9962         tp->dev->hard_start_xmit = tg3_start_xmit;
9963
9964         tp->rx_offset = 2;
9965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9966             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9967                 tp->rx_offset = 0;
9968
9969         /* By default, disable wake-on-lan.  User can change this
9970          * using ETHTOOL_SWOL.
9971          */
9972         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9973
9974         return err;
9975 }
9976
9977 #ifdef CONFIG_SPARC64
9978 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9979 {
9980         struct net_device *dev = tp->dev;
9981         struct pci_dev *pdev = tp->pdev;
9982         struct pcidev_cookie *pcp = pdev->sysdata;
9983
9984         if (pcp != NULL) {
9985                 int node = pcp->prom_node;
9986
9987                 if (prom_getproplen(node, "local-mac-address") == 6) {
9988                         prom_getproperty(node, "local-mac-address",
9989                                          dev->dev_addr, 6);
9990                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9991                         return 0;
9992                 }
9993         }
9994         return -ENODEV;
9995 }
9996
9997 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9998 {
9999         struct net_device *dev = tp->dev;
10000
10001         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10002         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10003         return 0;
10004 }
10005 #endif
10006
10007 static int __devinit tg3_get_device_address(struct tg3 *tp)
10008 {
10009         struct net_device *dev = tp->dev;
10010         u32 hi, lo, mac_offset;
10011
10012 #ifdef CONFIG_SPARC64
10013         if (!tg3_get_macaddr_sparc(tp))
10014                 return 0;
10015 #endif
10016
10017         mac_offset = 0x7c;
10018         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10019              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10020             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10021                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10022                         mac_offset = 0xcc;
10023                 if (tg3_nvram_lock(tp))
10024                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10025                 else
10026                         tg3_nvram_unlock(tp);
10027         }
10028
10029         /* First try to get it from MAC address mailbox. */
10030         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10031         if ((hi >> 16) == 0x484b) {
10032                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10033                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10034
10035                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10036                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10037                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10038                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10039                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10040         }
10041         /* Next, try NVRAM. */
10042         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10043                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10044                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10045                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10046                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10047                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10048                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10049                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10050                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10051         }
10052         /* Finally just fetch it out of the MAC control regs. */
10053         else {
10054                 hi = tr32(MAC_ADDR_0_HIGH);
10055                 lo = tr32(MAC_ADDR_0_LOW);
10056
10057                 dev->dev_addr[5] = lo & 0xff;
10058                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10059                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10060                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10061                 dev->dev_addr[1] = hi & 0xff;
10062                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10063         }
10064
10065         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10066 #ifdef CONFIG_SPARC64
10067                 if (!tg3_get_default_macaddr_sparc(tp))
10068                         return 0;
10069 #endif
10070                 return -EINVAL;
10071         }
10072         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10073         return 0;
10074 }
10075
10076 #define BOUNDARY_SINGLE_CACHELINE       1
10077 #define BOUNDARY_MULTI_CACHELINE        2
10078
10079 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10080 {
10081         int cacheline_size;
10082         u8 byte;
10083         int goal;
10084
10085         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10086         if (byte == 0)
10087                 cacheline_size = 1024;
10088         else
10089                 cacheline_size = (int) byte * 4;
10090
10091         /* On 5703 and later chips, the boundary bits have no
10092          * effect.
10093          */
10094         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10095             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10096             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10097                 goto out;
10098
10099 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10100         goal = BOUNDARY_MULTI_CACHELINE;
10101 #else
10102 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10103         goal = BOUNDARY_SINGLE_CACHELINE;
10104 #else
10105         goal = 0;
10106 #endif
10107 #endif
10108
10109         if (!goal)
10110                 goto out;
10111
10112         /* PCI controllers on most RISC systems tend to disconnect
10113          * when a device tries to burst across a cache-line boundary.
10114          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10115          *
10116          * Unfortunately, for PCI-E there are only limited
10117          * write-side controls for this, and thus for reads
10118          * we will still get the disconnects.  We'll also waste
10119          * these PCI cycles for both read and write for chips
10120          * other than 5700 and 5701 which do not implement the
10121          * boundary bits.
10122          */
10123         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10124             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10125                 switch (cacheline_size) {
10126                 case 16:
10127                 case 32:
10128                 case 64:
10129                 case 128:
10130                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10131                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10132                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10133                         } else {
10134                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10135                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10136                         }
10137                         break;
10138
10139                 case 256:
10140                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10141                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10142                         break;
10143
10144                 default:
10145                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10146                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10147                         break;
10148                 };
10149         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10150                 switch (cacheline_size) {
10151                 case 16:
10152                 case 32:
10153                 case 64:
10154                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10155                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10156                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10157                                 break;
10158                         }
10159                         /* fallthrough */
10160                 case 128:
10161                 default:
10162                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10163                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10164                         break;
10165                 };
10166         } else {
10167                 switch (cacheline_size) {
10168                 case 16:
10169                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10170                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10171                                         DMA_RWCTRL_WRITE_BNDRY_16);
10172                                 break;
10173                         }
10174                         /* fallthrough */
10175                 case 32:
10176                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10177                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10178                                         DMA_RWCTRL_WRITE_BNDRY_32);
10179                                 break;
10180                         }
10181                         /* fallthrough */
10182                 case 64:
10183                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10184                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10185                                         DMA_RWCTRL_WRITE_BNDRY_64);
10186                                 break;
10187                         }
10188                         /* fallthrough */
10189                 case 128:
10190                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10191                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10192                                         DMA_RWCTRL_WRITE_BNDRY_128);
10193                                 break;
10194                         }
10195                         /* fallthrough */
10196                 case 256:
10197                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10198                                 DMA_RWCTRL_WRITE_BNDRY_256);
10199                         break;
10200                 case 512:
10201                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10202                                 DMA_RWCTRL_WRITE_BNDRY_512);
10203                         break;
10204                 case 1024:
10205                 default:
10206                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10207                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10208                         break;
10209                 };
10210         }
10211
10212 out:
10213         return val;
10214 }
10215
10216 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10217 {
10218         struct tg3_internal_buffer_desc test_desc;
10219         u32 sram_dma_descs;
10220         int i, ret;
10221
10222         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10223
10224         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10225         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10226         tw32(RDMAC_STATUS, 0);
10227         tw32(WDMAC_STATUS, 0);
10228
10229         tw32(BUFMGR_MODE, 0);
10230         tw32(FTQ_RESET, 0);
10231
10232         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10233         test_desc.addr_lo = buf_dma & 0xffffffff;
10234         test_desc.nic_mbuf = 0x00002100;
10235         test_desc.len = size;
10236
10237         /*
10238          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10239          * the *second* time the tg3 driver was getting loaded after an
10240          * initial scan.
10241          *
10242          * Broadcom tells me:
10243          *   ...the DMA engine is connected to the GRC block and a DMA
10244          *   reset may affect the GRC block in some unpredictable way...
10245          *   The behavior of resets to individual blocks has not been tested.
10246          *
10247          * Broadcom noted the GRC reset will also reset all sub-components.
10248          */
10249         if (to_device) {
10250                 test_desc.cqid_sqid = (13 << 8) | 2;
10251
10252                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10253                 udelay(40);
10254         } else {
10255                 test_desc.cqid_sqid = (16 << 8) | 7;
10256
10257                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10258                 udelay(40);
10259         }
10260         test_desc.flags = 0x00000005;
10261
10262         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10263                 u32 val;
10264
10265                 val = *(((u32 *)&test_desc) + i);
10266                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10267                                        sram_dma_descs + (i * sizeof(u32)));
10268                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10269         }
10270         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10271
10272         if (to_device) {
10273                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10274         } else {
10275                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10276         }
10277
10278         ret = -ENODEV;
10279         for (i = 0; i < 40; i++) {
10280                 u32 val;
10281
10282                 if (to_device)
10283                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10284                 else
10285                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10286                 if ((val & 0xffff) == sram_dma_descs) {
10287                         ret = 0;
10288                         break;
10289                 }
10290
10291                 udelay(100);
10292         }
10293
10294         return ret;
10295 }
10296
10297 #define TEST_BUFFER_SIZE        0x2000
10298
10299 static int __devinit tg3_test_dma(struct tg3 *tp)
10300 {
10301         dma_addr_t buf_dma;
10302         u32 *buf, saved_dma_rwctrl;
10303         int ret;
10304
10305         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10306         if (!buf) {
10307                 ret = -ENOMEM;
10308                 goto out_nofree;
10309         }
10310
10311         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10312                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10313
10314         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10315
10316         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10317                 /* DMA read watermark not used on PCIE */
10318                 tp->dma_rwctrl |= 0x00180000;
10319         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10320                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10321                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10322                         tp->dma_rwctrl |= 0x003f0000;
10323                 else
10324                         tp->dma_rwctrl |= 0x003f000f;
10325         } else {
10326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10327                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10328                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10329
10330                         /* If the 5704 is behind the EPB bridge, we can
10331                          * do the less restrictive ONE_DMA workaround for
10332                          * better performance.
10333                          */
10334                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10335                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10336                                 tp->dma_rwctrl |= 0x8000;
10337                         else if (ccval == 0x6 || ccval == 0x7)
10338                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10339
10340                         /* Set bit 23 to enable PCIX hw bug fix */
10341                         tp->dma_rwctrl |= 0x009f0000;
10342                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10343                         /* 5780 always in PCIX mode */
10344                         tp->dma_rwctrl |= 0x00144000;
10345                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10346                         /* 5714 always in PCIX mode */
10347                         tp->dma_rwctrl |= 0x00148000;
10348                 } else {
10349                         tp->dma_rwctrl |= 0x001b000f;
10350                 }
10351         }
10352
10353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10354             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10355                 tp->dma_rwctrl &= 0xfffffff0;
10356
10357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10359                 /* Remove this if it causes problems for some boards. */
10360                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10361
10362                 /* On 5700/5701 chips, we need to set this bit.
10363                  * Otherwise the chip will issue cacheline transactions
10364                  * to streamable DMA memory with not all the byte
10365                  * enables turned on.  This is an error on several
10366                  * RISC PCI controllers, in particular sparc64.
10367                  *
10368                  * On 5703/5704 chips, this bit has been reassigned
10369                  * a different meaning.  In particular, it is used
10370                  * on those chips to enable a PCI-X workaround.
10371                  */
10372                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10373         }
10374
10375         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10376
10377 #if 0
10378         /* Unneeded, already done by tg3_get_invariants.  */
10379         tg3_switch_clocks(tp);
10380 #endif
10381
10382         ret = 0;
10383         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10384             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10385                 goto out;
10386
10387         /* It is best to perform DMA test with maximum write burst size
10388          * to expose the 5700/5701 write DMA bug.
10389          */
10390         saved_dma_rwctrl = tp->dma_rwctrl;
10391         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10392         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10393
10394         while (1) {
10395                 u32 *p = buf, i;
10396
10397                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10398                         p[i] = i;
10399
10400                 /* Send the buffer to the chip. */
10401                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10402                 if (ret) {
10403                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10404                         break;
10405                 }
10406
10407 #if 0
10408                 /* validate data reached card RAM correctly. */
10409                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10410                         u32 val;
10411                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10412                         if (le32_to_cpu(val) != p[i]) {
10413                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10414                                 /* ret = -ENODEV here? */
10415                         }
10416                         p[i] = 0;
10417                 }
10418 #endif
10419                 /* Now read it back. */
10420                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10421                 if (ret) {
10422                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10423
10424                         break;
10425                 }
10426
10427                 /* Verify it. */
10428                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10429                         if (p[i] == i)
10430                                 continue;
10431
10432                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10433                             DMA_RWCTRL_WRITE_BNDRY_16) {
10434                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10435                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10436                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10437                                 break;
10438                         } else {
10439                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10440                                 ret = -ENODEV;
10441                                 goto out;
10442                         }
10443                 }
10444
10445                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10446                         /* Success. */
10447                         ret = 0;
10448                         break;
10449                 }
10450         }
10451         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10452             DMA_RWCTRL_WRITE_BNDRY_16) {
10453                 static struct pci_device_id dma_wait_state_chipsets[] = {
10454                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10455                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10456                         { },
10457                 };
10458
10459                 /* DMA test passed without adjusting DMA boundary,
10460                  * now look for chipsets that are known to expose the
10461                  * DMA bug without failing the test.
10462                  */
10463                 if (pci_dev_present(dma_wait_state_chipsets)) {
10464                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10465                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10466                 }
10467                 else
10468                         /* Safe to use the calculated DMA boundary. */
10469                         tp->dma_rwctrl = saved_dma_rwctrl;
10470
10471                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10472         }
10473
10474 out:
10475         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10476 out_nofree:
10477         return ret;
10478 }
10479
10480 static void __devinit tg3_init_link_config(struct tg3 *tp)
10481 {
10482         tp->link_config.advertising =
10483                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10484                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10485                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10486                  ADVERTISED_Autoneg | ADVERTISED_MII);
10487         tp->link_config.speed = SPEED_INVALID;
10488         tp->link_config.duplex = DUPLEX_INVALID;
10489         tp->link_config.autoneg = AUTONEG_ENABLE;
10490         netif_carrier_off(tp->dev);
10491         tp->link_config.active_speed = SPEED_INVALID;
10492         tp->link_config.active_duplex = DUPLEX_INVALID;
10493         tp->link_config.phy_is_low_power = 0;
10494         tp->link_config.orig_speed = SPEED_INVALID;
10495         tp->link_config.orig_duplex = DUPLEX_INVALID;
10496         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10497 }
10498
10499 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10500 {
10501         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10502                 tp->bufmgr_config.mbuf_read_dma_low_water =
10503                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10504                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10505                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10506                 tp->bufmgr_config.mbuf_high_water =
10507                         DEFAULT_MB_HIGH_WATER_5705;
10508
10509                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10510                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10511                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10512                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10513                 tp->bufmgr_config.mbuf_high_water_jumbo =
10514                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10515         } else {
10516                 tp->bufmgr_config.mbuf_read_dma_low_water =
10517                         DEFAULT_MB_RDMA_LOW_WATER;
10518                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10519                         DEFAULT_MB_MACRX_LOW_WATER;
10520                 tp->bufmgr_config.mbuf_high_water =
10521                         DEFAULT_MB_HIGH_WATER;
10522
10523                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10524                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10525                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10526                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10527                 tp->bufmgr_config.mbuf_high_water_jumbo =
10528                         DEFAULT_MB_HIGH_WATER_JUMBO;
10529         }
10530
10531         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10532         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10533 }
10534
10535 static char * __devinit tg3_phy_string(struct tg3 *tp)
10536 {
10537         switch (tp->phy_id & PHY_ID_MASK) {
10538         case PHY_ID_BCM5400:    return "5400";
10539         case PHY_ID_BCM5401:    return "5401";
10540         case PHY_ID_BCM5411:    return "5411";
10541         case PHY_ID_BCM5701:    return "5701";
10542         case PHY_ID_BCM5703:    return "5703";
10543         case PHY_ID_BCM5704:    return "5704";
10544         case PHY_ID_BCM5705:    return "5705";
10545         case PHY_ID_BCM5750:    return "5750";
10546         case PHY_ID_BCM5752:    return "5752";
10547         case PHY_ID_BCM5714:    return "5714";
10548         case PHY_ID_BCM5780:    return "5780";
10549         case PHY_ID_BCM8002:    return "8002/serdes";
10550         case 0:                 return "serdes";
10551         default:                return "unknown";
10552         };
10553 }
10554
10555 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10556 {
10557         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10558                 strcpy(str, "PCI Express");
10559                 return str;
10560         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10561                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10562
10563                 strcpy(str, "PCIX:");
10564
10565                 if ((clock_ctrl == 7) ||
10566                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10567                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10568                         strcat(str, "133MHz");
10569                 else if (clock_ctrl == 0)
10570                         strcat(str, "33MHz");
10571                 else if (clock_ctrl == 2)
10572                         strcat(str, "50MHz");
10573                 else if (clock_ctrl == 4)
10574                         strcat(str, "66MHz");
10575                 else if (clock_ctrl == 6)
10576                         strcat(str, "100MHz");
10577         } else {
10578                 strcpy(str, "PCI:");
10579                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10580                         strcat(str, "66MHz");
10581                 else
10582                         strcat(str, "33MHz");
10583         }
10584         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10585                 strcat(str, ":32-bit");
10586         else
10587                 strcat(str, ":64-bit");
10588         return str;
10589 }
10590
10591 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
10592 {
10593         struct pci_dev *peer;
10594         unsigned int func, devnr = tp->pdev->devfn & ~7;
10595
10596         for (func = 0; func < 8; func++) {
10597                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10598                 if (peer && peer != tp->pdev)
10599                         break;
10600                 pci_dev_put(peer);
10601         }
10602         /* 5704 can be configured in single-port mode, set peer to
10603          * tp->pdev in that case.
10604          */
10605         if (!peer) {
10606                 peer = tp->pdev;
10607                 return peer;
10608         }
10609
10610         /*
10611          * We don't need to keep the refcount elevated; there's no way
10612          * to remove one half of this device without removing the other
10613          */
10614         pci_dev_put(peer);
10615
10616         return peer;
10617 }
10618
10619 static void __devinit tg3_init_coal(struct tg3 *tp)
10620 {
10621         struct ethtool_coalesce *ec = &tp->coal;
10622
10623         memset(ec, 0, sizeof(*ec));
10624         ec->cmd = ETHTOOL_GCOALESCE;
10625         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10626         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10627         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10628         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10629         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10630         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10631         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10632         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10633         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10634
10635         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10636                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10637                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10638                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10639                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10640                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10641         }
10642
10643         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10644                 ec->rx_coalesce_usecs_irq = 0;
10645                 ec->tx_coalesce_usecs_irq = 0;
10646                 ec->stats_block_coalesce_usecs = 0;
10647         }
10648 }
10649
10650 static int __devinit tg3_init_one(struct pci_dev *pdev,
10651                                   const struct pci_device_id *ent)
10652 {
10653         static int tg3_version_printed = 0;
10654         unsigned long tg3reg_base, tg3reg_len;
10655         struct net_device *dev;
10656         struct tg3 *tp;
10657         int i, err, pm_cap;
10658         char str[40];
10659         u64 dma_mask, persist_dma_mask;
10660
10661         if (tg3_version_printed++ == 0)
10662                 printk(KERN_INFO "%s", version);
10663
10664         err = pci_enable_device(pdev);
10665         if (err) {
10666                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10667                        "aborting.\n");
10668                 return err;
10669         }
10670
10671         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10672                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10673                        "base address, aborting.\n");
10674                 err = -ENODEV;
10675                 goto err_out_disable_pdev;
10676         }
10677
10678         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10679         if (err) {
10680                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10681                        "aborting.\n");
10682                 goto err_out_disable_pdev;
10683         }
10684
10685         pci_set_master(pdev);
10686
10687         /* Find power-management capability. */
10688         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10689         if (pm_cap == 0) {
10690                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10691                        "aborting.\n");
10692                 err = -EIO;
10693                 goto err_out_free_res;
10694         }
10695
10696         tg3reg_base = pci_resource_start(pdev, 0);
10697         tg3reg_len = pci_resource_len(pdev, 0);
10698
10699         dev = alloc_etherdev(sizeof(*tp));
10700         if (!dev) {
10701                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10702                 err = -ENOMEM;
10703                 goto err_out_free_res;
10704         }
10705
10706         SET_MODULE_OWNER(dev);
10707         SET_NETDEV_DEV(dev, &pdev->dev);
10708
10709         dev->features |= NETIF_F_LLTX;
10710 #if TG3_VLAN_TAG_USED
10711         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10712         dev->vlan_rx_register = tg3_vlan_rx_register;
10713         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10714 #endif
10715
10716         tp = netdev_priv(dev);
10717         tp->pdev = pdev;
10718         tp->dev = dev;
10719         tp->pm_cap = pm_cap;
10720         tp->mac_mode = TG3_DEF_MAC_MODE;
10721         tp->rx_mode = TG3_DEF_RX_MODE;
10722         tp->tx_mode = TG3_DEF_TX_MODE;
10723         tp->mi_mode = MAC_MI_MODE_BASE;
10724         if (tg3_debug > 0)
10725                 tp->msg_enable = tg3_debug;
10726         else
10727                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10728
10729         /* The word/byte swap controls here control register access byte
10730          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10731          * setting below.
10732          */
10733         tp->misc_host_ctrl =
10734                 MISC_HOST_CTRL_MASK_PCI_INT |
10735                 MISC_HOST_CTRL_WORD_SWAP |
10736                 MISC_HOST_CTRL_INDIR_ACCESS |
10737                 MISC_HOST_CTRL_PCISTATE_RW;
10738
10739         /* The NONFRM (non-frame) byte/word swap controls take effect
10740          * on descriptor entries, anything which isn't packet data.
10741          *
10742          * The StrongARM chips on the board (one for tx, one for rx)
10743          * are running in big-endian mode.
10744          */
10745         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10746                         GRC_MODE_WSWAP_NONFRM_DATA);
10747 #ifdef __BIG_ENDIAN
10748         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10749 #endif
10750         spin_lock_init(&tp->lock);
10751         spin_lock_init(&tp->tx_lock);
10752         spin_lock_init(&tp->indirect_lock);
10753         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10754
10755         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10756         if (tp->regs == 0UL) {
10757                 printk(KERN_ERR PFX "Cannot map device registers, "
10758                        "aborting.\n");
10759                 err = -ENOMEM;
10760                 goto err_out_free_dev;
10761         }
10762
10763         tg3_init_link_config(tp);
10764
10765         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10766         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10767         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10768
10769         dev->open = tg3_open;
10770         dev->stop = tg3_close;
10771         dev->get_stats = tg3_get_stats;
10772         dev->set_multicast_list = tg3_set_rx_mode;
10773         dev->set_mac_address = tg3_set_mac_addr;
10774         dev->do_ioctl = tg3_ioctl;
10775         dev->tx_timeout = tg3_tx_timeout;
10776         dev->poll = tg3_poll;
10777         dev->ethtool_ops = &tg3_ethtool_ops;
10778         dev->weight = 64;
10779         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10780         dev->change_mtu = tg3_change_mtu;
10781         dev->irq = pdev->irq;
10782 #ifdef CONFIG_NET_POLL_CONTROLLER
10783         dev->poll_controller = tg3_poll_controller;
10784 #endif
10785
10786         err = tg3_get_invariants(tp);
10787         if (err) {
10788                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10789                        "aborting.\n");
10790                 goto err_out_iounmap;
10791         }
10792
10793         /* The EPB bridge inside 5714, 5715, and 5780 and any
10794          * device behind the EPB cannot support DMA addresses > 40-bit.
10795          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10796          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10797          * do DMA address check in tg3_start_xmit().
10798          */
10799         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10800                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
10801         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
10802                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
10803 #ifdef CONFIG_HIGHMEM
10804                 dma_mask = DMA_64BIT_MASK;
10805 #endif
10806         } else
10807                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
10808
10809         /* Configure DMA attributes. */
10810         if (dma_mask > DMA_32BIT_MASK) {
10811                 err = pci_set_dma_mask(pdev, dma_mask);
10812                 if (!err) {
10813                         dev->features |= NETIF_F_HIGHDMA;
10814                         err = pci_set_consistent_dma_mask(pdev,
10815                                                           persist_dma_mask);
10816                         if (err < 0) {
10817                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
10818                                        "DMA for consistent allocations\n");
10819                                 goto err_out_iounmap;
10820                         }
10821                 }
10822         }
10823         if (err || dma_mask == DMA_32BIT_MASK) {
10824                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10825                 if (err) {
10826                         printk(KERN_ERR PFX "No usable DMA configuration, "
10827                                "aborting.\n");
10828                         goto err_out_iounmap;
10829                 }
10830         }
10831
10832         tg3_init_bufmgr_config(tp);
10833
10834 #if TG3_TSO_SUPPORT != 0
10835         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10836                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10837         }
10838         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10840             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10841             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10842                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10843         } else {
10844                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10845         }
10846
10847         /* TSO is off by default, user can enable using ethtool.  */
10848 #if 0
10849         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10850                 dev->features |= NETIF_F_TSO;
10851 #endif
10852
10853 #endif
10854
10855         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10856             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10858                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10859                 tp->rx_pending = 63;
10860         }
10861
10862         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10863             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10864                 tp->pdev_peer = tg3_find_peer(tp);
10865
10866         err = tg3_get_device_address(tp);
10867         if (err) {
10868                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10869                        "aborting.\n");
10870                 goto err_out_iounmap;
10871         }
10872
10873         /*
10874          * Reset chip in case UNDI or EFI driver did not shutdown
10875          * DMA self test will enable WDMAC and we'll see (spurious)
10876          * pending DMA on the PCI bus at that point.
10877          */
10878         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10879             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10880                 pci_save_state(tp->pdev);
10881                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10882                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10883         }
10884
10885         err = tg3_test_dma(tp);
10886         if (err) {
10887                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10888                 goto err_out_iounmap;
10889         }
10890
10891         /* Tigon3 can do ipv4 only... and some chips have buggy
10892          * checksumming.
10893          */
10894         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10895                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10896                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10897         } else
10898                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10899
10900         /* flow control autonegotiation is default behavior */
10901         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10902
10903         tg3_init_coal(tp);
10904
10905         /* Now that we have fully setup the chip, save away a snapshot
10906          * of the PCI config space.  We need to restore this after
10907          * GRC_MISC_CFG core clock resets and some resume events.
10908          */
10909         pci_save_state(tp->pdev);
10910
10911         err = register_netdev(dev);
10912         if (err) {
10913                 printk(KERN_ERR PFX "Cannot register net device, "
10914                        "aborting.\n");
10915                 goto err_out_iounmap;
10916         }
10917
10918         pci_set_drvdata(pdev, dev);
10919
10920         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10921                dev->name,
10922                tp->board_part_number,
10923                tp->pci_chip_rev_id,
10924                tg3_phy_string(tp),
10925                tg3_bus_string(tp, str),
10926                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10927
10928         for (i = 0; i < 6; i++)
10929                 printk("%2.2x%c", dev->dev_addr[i],
10930                        i == 5 ? '\n' : ':');
10931
10932         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10933                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10934                "TSOcap[%d] \n",
10935                dev->name,
10936                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10937                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10938                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10939                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10940                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10941                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10942                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10943         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
10944                dev->name, tp->dma_rwctrl,
10945                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
10946                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
10947
10948         return 0;
10949
10950 err_out_iounmap:
10951         if (tp->regs) {
10952                 iounmap(tp->regs);
10953                 tp->regs = NULL;
10954         }
10955
10956 err_out_free_dev:
10957         free_netdev(dev);
10958
10959 err_out_free_res:
10960         pci_release_regions(pdev);
10961
10962 err_out_disable_pdev:
10963         pci_disable_device(pdev);
10964         pci_set_drvdata(pdev, NULL);
10965         return err;
10966 }
10967
10968 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10969 {
10970         struct net_device *dev = pci_get_drvdata(pdev);
10971
10972         if (dev) {
10973                 struct tg3 *tp = netdev_priv(dev);
10974
10975                 flush_scheduled_work();
10976                 unregister_netdev(dev);
10977                 if (tp->regs) {
10978                         iounmap(tp->regs);
10979                         tp->regs = NULL;
10980                 }
10981                 free_netdev(dev);
10982                 pci_release_regions(pdev);
10983                 pci_disable_device(pdev);
10984                 pci_set_drvdata(pdev, NULL);
10985         }
10986 }
10987
10988 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10989 {
10990         struct net_device *dev = pci_get_drvdata(pdev);
10991         struct tg3 *tp = netdev_priv(dev);
10992         int err;
10993
10994         if (!netif_running(dev))
10995                 return 0;
10996
10997         flush_scheduled_work();
10998         tg3_netif_stop(tp);
10999
11000         del_timer_sync(&tp->timer);
11001
11002         tg3_full_lock(tp, 1);
11003         tg3_disable_ints(tp);
11004         tg3_full_unlock(tp);
11005
11006         netif_device_detach(dev);
11007
11008         tg3_full_lock(tp, 0);
11009         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11010         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11011         tg3_full_unlock(tp);
11012
11013         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11014         if (err) {
11015                 tg3_full_lock(tp, 0);
11016
11017                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11018                 tg3_init_hw(tp);
11019
11020                 tp->timer.expires = jiffies + tp->timer_offset;
11021                 add_timer(&tp->timer);
11022
11023                 netif_device_attach(dev);
11024                 tg3_netif_start(tp);
11025
11026                 tg3_full_unlock(tp);
11027         }
11028
11029         return err;
11030 }
11031
11032 static int tg3_resume(struct pci_dev *pdev)
11033 {
11034         struct net_device *dev = pci_get_drvdata(pdev);
11035         struct tg3 *tp = netdev_priv(dev);
11036         int err;
11037
11038         if (!netif_running(dev))
11039                 return 0;
11040
11041         pci_restore_state(tp->pdev);
11042
11043         err = tg3_set_power_state(tp, 0);
11044         if (err)
11045                 return err;
11046
11047         netif_device_attach(dev);
11048
11049         tg3_full_lock(tp, 0);
11050
11051         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11052         tg3_init_hw(tp);
11053
11054         tp->timer.expires = jiffies + tp->timer_offset;
11055         add_timer(&tp->timer);
11056
11057         tg3_netif_start(tp);
11058
11059         tg3_full_unlock(tp);
11060
11061         return 0;
11062 }
11063
11064 static struct pci_driver tg3_driver = {
11065         .name           = DRV_MODULE_NAME,
11066         .id_table       = tg3_pci_tbl,
11067         .probe          = tg3_init_one,
11068         .remove         = __devexit_p(tg3_remove_one),
11069         .suspend        = tg3_suspend,
11070         .resume         = tg3_resume
11071 };
11072
11073 static int __init tg3_init(void)
11074 {
11075         return pci_module_init(&tg3_driver);
11076 }
11077
11078 static void __exit tg3_cleanup(void)
11079 {
11080         pci_unregister_driver(&tg3_driver);
11081 }
11082
11083 module_init(tg3_init);
11084 module_exit(tg3_cleanup);