Merge with http://kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.43"
72 #define DRV_MODULE_RELDATE      "Oct 24, 2005"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         ((TP)->tx_pending -                                             \
128          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
132 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
133
134 /* minimum number of free TX descriptors required to wake up TX process */
135 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
136
137 /* number of ETHTOOL_GSTATS u64's */
138 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
140 #define TG3_NUM_TEST            6
141
142 static char version[] __devinitdata =
143         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147 MODULE_LICENSE("GPL");
148 MODULE_VERSION(DRV_MODULE_VERSION);
149
150 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
151 module_param(tg3_debug, int, 0);
152 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154 static struct pci_device_id tg3_pci_tbl[] = {
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
234           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
236           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
237         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
238           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
240           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
242           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
243         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
244           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
245         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
246           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247         { 0, }
248 };
249
250 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
251
252 static struct {
253         const char string[ETH_GSTRING_LEN];
254 } ethtool_stats_keys[TG3_NUM_STATS] = {
255         { "rx_octets" },
256         { "rx_fragments" },
257         { "rx_ucast_packets" },
258         { "rx_mcast_packets" },
259         { "rx_bcast_packets" },
260         { "rx_fcs_errors" },
261         { "rx_align_errors" },
262         { "rx_xon_pause_rcvd" },
263         { "rx_xoff_pause_rcvd" },
264         { "rx_mac_ctrl_rcvd" },
265         { "rx_xoff_entered" },
266         { "rx_frame_too_long_errors" },
267         { "rx_jabbers" },
268         { "rx_undersize_packets" },
269         { "rx_in_length_errors" },
270         { "rx_out_length_errors" },
271         { "rx_64_or_less_octet_packets" },
272         { "rx_65_to_127_octet_packets" },
273         { "rx_128_to_255_octet_packets" },
274         { "rx_256_to_511_octet_packets" },
275         { "rx_512_to_1023_octet_packets" },
276         { "rx_1024_to_1522_octet_packets" },
277         { "rx_1523_to_2047_octet_packets" },
278         { "rx_2048_to_4095_octet_packets" },
279         { "rx_4096_to_8191_octet_packets" },
280         { "rx_8192_to_9022_octet_packets" },
281
282         { "tx_octets" },
283         { "tx_collisions" },
284
285         { "tx_xon_sent" },
286         { "tx_xoff_sent" },
287         { "tx_flow_control" },
288         { "tx_mac_errors" },
289         { "tx_single_collisions" },
290         { "tx_mult_collisions" },
291         { "tx_deferred" },
292         { "tx_excessive_collisions" },
293         { "tx_late_collisions" },
294         { "tx_collide_2times" },
295         { "tx_collide_3times" },
296         { "tx_collide_4times" },
297         { "tx_collide_5times" },
298         { "tx_collide_6times" },
299         { "tx_collide_7times" },
300         { "tx_collide_8times" },
301         { "tx_collide_9times" },
302         { "tx_collide_10times" },
303         { "tx_collide_11times" },
304         { "tx_collide_12times" },
305         { "tx_collide_13times" },
306         { "tx_collide_14times" },
307         { "tx_collide_15times" },
308         { "tx_ucast_packets" },
309         { "tx_mcast_packets" },
310         { "tx_bcast_packets" },
311         { "tx_carrier_sense_errors" },
312         { "tx_discards" },
313         { "tx_errors" },
314
315         { "dma_writeq_full" },
316         { "dma_write_prioq_full" },
317         { "rxbds_empty" },
318         { "rx_discards" },
319         { "rx_errors" },
320         { "rx_threshold_hit" },
321
322         { "dma_readq_full" },
323         { "dma_read_prioq_full" },
324         { "tx_comp_queue_full" },
325
326         { "ring_set_send_prod_index" },
327         { "ring_status_update" },
328         { "nic_irqs" },
329         { "nic_avoided_irqs" },
330         { "nic_tx_threshold_hit" }
331 };
332
333 static struct {
334         const char string[ETH_GSTRING_LEN];
335 } ethtool_test_keys[TG3_NUM_TEST] = {
336         { "nvram test     (online) " },
337         { "link test      (online) " },
338         { "register test  (offline)" },
339         { "memory test    (offline)" },
340         { "loopback test  (offline)" },
341         { "interrupt test (offline)" },
342 };
343
344 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         spin_lock_irqsave(&tp->indirect_lock, flags);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
351         spin_unlock_irqrestore(&tp->indirect_lock, flags);
352 }
353
354 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
355 {
356         writel(val, tp->regs + off);
357         readl(tp->regs + off);
358 }
359
360 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
361 {
362         unsigned long flags;
363         u32 val;
364
365         spin_lock_irqsave(&tp->indirect_lock, flags);
366         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
367         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
368         spin_unlock_irqrestore(&tp->indirect_lock, flags);
369         return val;
370 }
371
372 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
373 {
374         unsigned long flags;
375
376         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
377                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
378                                        TG3_64BIT_REG_LOW, val);
379                 return;
380         }
381         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
383                                        TG3_64BIT_REG_LOW, val);
384                 return;
385         }
386
387         spin_lock_irqsave(&tp->indirect_lock, flags);
388         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
390         spin_unlock_irqrestore(&tp->indirect_lock, flags);
391
392         /* In indirect mode when disabling interrupts, we also need
393          * to clear the interrupt bit in the GRC local ctrl register.
394          */
395         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
396             (val == 0x1)) {
397                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
398                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
399         }
400 }
401
402 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
403 {
404         unsigned long flags;
405         u32 val;
406
407         spin_lock_irqsave(&tp->indirect_lock, flags);
408         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
409         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
410         spin_unlock_irqrestore(&tp->indirect_lock, flags);
411         return val;
412 }
413
414 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
415 {
416         tp->write32(tp, off, val);
417         if (!(tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) &&
418             !(tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) &&
419             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
420                 tp->read32(tp, off);    /* flush */
421 }
422
423 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
424 {
425         tp->write32_mbox(tp, off, val);
426         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
427             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
428                 tp->read32_mbox(tp, off);
429 }
430
431 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
432 {
433         void __iomem *mbox = tp->regs + off;
434         writel(val, mbox);
435         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
436                 writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
438                 readl(mbox);
439 }
440
441 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
442 {
443         writel(val, tp->regs + off);
444 }
445
446 static u32 tg3_read32(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off)); 
449 }
450
451 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
452 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
453 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
454 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
455 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
456
457 #define tw32(reg,val)           tp->write32(tp, reg, val)
458 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
459 #define tr32(reg)               tp->read32(tp, reg)
460
461 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
462 {
463         unsigned long flags;
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
467         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
468
469         /* Always leave this as zero. */
470         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475 {
476         /* If no workaround is needed, write to mem space directly */
477         if (tp->write32 != tg3_write_indirect_reg32)
478                 tw32(NIC_SRAM_WIN_BASE + off, val);
479         else
480                 tg3_write_mem(tp, off, val);
481 }
482
483 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491         /* Always leave this as zero. */
492         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         spin_unlock_irqrestore(&tp->indirect_lock, flags);
494 }
495
496 static void tg3_disable_ints(struct tg3 *tp)
497 {
498         tw32(TG3PCI_MISC_HOST_CTRL,
499              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
500         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
501 }
502
503 static inline void tg3_cond_int(struct tg3 *tp)
504 {
505         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
506             (tp->hw_status->status & SD_STATUS_UPDATED))
507                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
508 }
509
510 static void tg3_enable_ints(struct tg3 *tp)
511 {
512         tp->irq_sync = 0;
513         wmb();
514
515         tw32(TG3PCI_MISC_HOST_CTRL,
516              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
517         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
518                        (tp->last_tag << 24));
519         tg3_cond_int(tp);
520 }
521
522 static inline unsigned int tg3_has_work(struct tg3 *tp)
523 {
524         struct tg3_hw_status *sblk = tp->hw_status;
525         unsigned int work_exists = 0;
526
527         /* check for phy events */
528         if (!(tp->tg3_flags &
529               (TG3_FLAG_USE_LINKCHG_REG |
530                TG3_FLAG_POLL_SERDES))) {
531                 if (sblk->status & SD_STATUS_LINK_CHG)
532                         work_exists = 1;
533         }
534         /* check for RX/TX work to do */
535         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
536             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
537                 work_exists = 1;
538
539         return work_exists;
540 }
541
542 /* tg3_restart_ints
543  *  similar to tg3_enable_ints, but it accurately determines whether there
544  *  is new work pending and can return without flushing the PIO write
545  *  which reenables interrupts 
546  */
547 static void tg3_restart_ints(struct tg3 *tp)
548 {
549         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
550                      tp->last_tag << 24);
551         mmiowb();
552
553         /* When doing tagged status, this work check is unnecessary.
554          * The last_tag we write above tells the chip which piece of
555          * work we've completed.
556          */
557         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
558             tg3_has_work(tp))
559                 tw32(HOSTCC_MODE, tp->coalesce_mode |
560                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
561 }
562
563 static inline void tg3_netif_stop(struct tg3 *tp)
564 {
565         tp->dev->trans_start = jiffies; /* prevent tx timeout */
566         netif_poll_disable(tp->dev);
567         netif_tx_disable(tp->dev);
568 }
569
570 static inline void tg3_netif_start(struct tg3 *tp)
571 {
572         netif_wake_queue(tp->dev);
573         /* NOTE: unconditional netif_wake_queue is only appropriate
574          * so long as all callers are assured to have free tx slots
575          * (such as after tg3_init_hw)
576          */
577         netif_poll_enable(tp->dev);
578         tp->hw_status->status |= SD_STATUS_UPDATED;
579         tg3_enable_ints(tp);
580 }
581
582 static void tg3_switch_clocks(struct tg3 *tp)
583 {
584         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
585         u32 orig_clock_ctrl;
586
587         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
588                 return;
589
590         orig_clock_ctrl = clock_ctrl;
591         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
592                        CLOCK_CTRL_CLKRUN_OENABLE |
593                        0x1f);
594         tp->pci_clock_ctrl = clock_ctrl;
595
596         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
597                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
598                         tw32_f(TG3PCI_CLOCK_CTRL,
599                                clock_ctrl | CLOCK_CTRL_625_CORE);
600                         udelay(40);
601                 }
602         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
603                 tw32_f(TG3PCI_CLOCK_CTRL,
604                      clock_ctrl |
605                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
606                 udelay(40);
607                 tw32_f(TG3PCI_CLOCK_CTRL,
608                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
609                 udelay(40);
610         }
611         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
612         udelay(40);
613 }
614
615 #define PHY_BUSY_LOOPS  5000
616
617 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
618 {
619         u32 frame_val;
620         unsigned int loops;
621         int ret;
622
623         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
624                 tw32_f(MAC_MI_MODE,
625                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
626                 udelay(80);
627         }
628
629         *val = 0x0;
630
631         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
632                       MI_COM_PHY_ADDR_MASK);
633         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
634                       MI_COM_REG_ADDR_MASK);
635         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
636         
637         tw32_f(MAC_MI_COM, frame_val);
638
639         loops = PHY_BUSY_LOOPS;
640         while (loops != 0) {
641                 udelay(10);
642                 frame_val = tr32(MAC_MI_COM);
643
644                 if ((frame_val & MI_COM_BUSY) == 0) {
645                         udelay(5);
646                         frame_val = tr32(MAC_MI_COM);
647                         break;
648                 }
649                 loops -= 1;
650         }
651
652         ret = -EBUSY;
653         if (loops != 0) {
654                 *val = frame_val & MI_COM_DATA_MASK;
655                 ret = 0;
656         }
657
658         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
659                 tw32_f(MAC_MI_MODE, tp->mi_mode);
660                 udelay(80);
661         }
662
663         return ret;
664 }
665
666 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
667 {
668         u32 frame_val;
669         unsigned int loops;
670         int ret;
671
672         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
673                 tw32_f(MAC_MI_MODE,
674                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
675                 udelay(80);
676         }
677
678         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
679                       MI_COM_PHY_ADDR_MASK);
680         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
681                       MI_COM_REG_ADDR_MASK);
682         frame_val |= (val & MI_COM_DATA_MASK);
683         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
684         
685         tw32_f(MAC_MI_COM, frame_val);
686
687         loops = PHY_BUSY_LOOPS;
688         while (loops != 0) {
689                 udelay(10);
690                 frame_val = tr32(MAC_MI_COM);
691                 if ((frame_val & MI_COM_BUSY) == 0) {
692                         udelay(5);
693                         frame_val = tr32(MAC_MI_COM);
694                         break;
695                 }
696                 loops -= 1;
697         }
698
699         ret = -EBUSY;
700         if (loops != 0)
701                 ret = 0;
702
703         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
704                 tw32_f(MAC_MI_MODE, tp->mi_mode);
705                 udelay(80);
706         }
707
708         return ret;
709 }
710
711 static void tg3_phy_set_wirespeed(struct tg3 *tp)
712 {
713         u32 val;
714
715         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
716                 return;
717
718         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
719             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
720                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
721                              (val | (1 << 15) | (1 << 4)));
722 }
723
724 static int tg3_bmcr_reset(struct tg3 *tp)
725 {
726         u32 phy_control;
727         int limit, err;
728
729         /* OK, reset it, and poll the BMCR_RESET bit until it
730          * clears or we time out.
731          */
732         phy_control = BMCR_RESET;
733         err = tg3_writephy(tp, MII_BMCR, phy_control);
734         if (err != 0)
735                 return -EBUSY;
736
737         limit = 5000;
738         while (limit--) {
739                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
740                 if (err != 0)
741                         return -EBUSY;
742
743                 if ((phy_control & BMCR_RESET) == 0) {
744                         udelay(40);
745                         break;
746                 }
747                 udelay(10);
748         }
749         if (limit <= 0)
750                 return -EBUSY;
751
752         return 0;
753 }
754
755 static int tg3_wait_macro_done(struct tg3 *tp)
756 {
757         int limit = 100;
758
759         while (limit--) {
760                 u32 tmp32;
761
762                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
763                         if ((tmp32 & 0x1000) == 0)
764                                 break;
765                 }
766         }
767         if (limit <= 0)
768                 return -EBUSY;
769
770         return 0;
771 }
772
773 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
774 {
775         static const u32 test_pat[4][6] = {
776         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
777         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
778         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
779         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
780         };
781         int chan;
782
783         for (chan = 0; chan < 4; chan++) {
784                 int i;
785
786                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
787                              (chan * 0x2000) | 0x0200);
788                 tg3_writephy(tp, 0x16, 0x0002);
789
790                 for (i = 0; i < 6; i++)
791                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
792                                      test_pat[chan][i]);
793
794                 tg3_writephy(tp, 0x16, 0x0202);
795                 if (tg3_wait_macro_done(tp)) {
796                         *resetp = 1;
797                         return -EBUSY;
798                 }
799
800                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
801                              (chan * 0x2000) | 0x0200);
802                 tg3_writephy(tp, 0x16, 0x0082);
803                 if (tg3_wait_macro_done(tp)) {
804                         *resetp = 1;
805                         return -EBUSY;
806                 }
807
808                 tg3_writephy(tp, 0x16, 0x0802);
809                 if (tg3_wait_macro_done(tp)) {
810                         *resetp = 1;
811                         return -EBUSY;
812                 }
813
814                 for (i = 0; i < 6; i += 2) {
815                         u32 low, high;
816
817                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
818                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
819                             tg3_wait_macro_done(tp)) {
820                                 *resetp = 1;
821                                 return -EBUSY;
822                         }
823                         low &= 0x7fff;
824                         high &= 0x000f;
825                         if (low != test_pat[chan][i] ||
826                             high != test_pat[chan][i+1]) {
827                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
828                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
829                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
830
831                                 return -EBUSY;
832                         }
833                 }
834         }
835
836         return 0;
837 }
838
839 static int tg3_phy_reset_chanpat(struct tg3 *tp)
840 {
841         int chan;
842
843         for (chan = 0; chan < 4; chan++) {
844                 int i;
845
846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
847                              (chan * 0x2000) | 0x0200);
848                 tg3_writephy(tp, 0x16, 0x0002);
849                 for (i = 0; i < 6; i++)
850                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
851                 tg3_writephy(tp, 0x16, 0x0202);
852                 if (tg3_wait_macro_done(tp))
853                         return -EBUSY;
854         }
855
856         return 0;
857 }
858
859 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
860 {
861         u32 reg32, phy9_orig;
862         int retries, do_phy_reset, err;
863
864         retries = 10;
865         do_phy_reset = 1;
866         do {
867                 if (do_phy_reset) {
868                         err = tg3_bmcr_reset(tp);
869                         if (err)
870                                 return err;
871                         do_phy_reset = 0;
872                 }
873
874                 /* Disable transmitter and interrupt.  */
875                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
876                         continue;
877
878                 reg32 |= 0x3000;
879                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
880
881                 /* Set full-duplex, 1000 mbps.  */
882                 tg3_writephy(tp, MII_BMCR,
883                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
884
885                 /* Set to master mode.  */
886                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
887                         continue;
888
889                 tg3_writephy(tp, MII_TG3_CTRL,
890                              (MII_TG3_CTRL_AS_MASTER |
891                               MII_TG3_CTRL_ENABLE_AS_MASTER));
892
893                 /* Enable SM_DSP_CLOCK and 6dB.  */
894                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
895
896                 /* Block the PHY control access.  */
897                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
898                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
899
900                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
901                 if (!err)
902                         break;
903         } while (--retries);
904
905         err = tg3_phy_reset_chanpat(tp);
906         if (err)
907                 return err;
908
909         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
910         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
911
912         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
913         tg3_writephy(tp, 0x16, 0x0000);
914
915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
917                 /* Set Extended packet length bit for jumbo frames */
918                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
919         }
920         else {
921                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
922         }
923
924         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
925
926         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
927                 reg32 &= ~0x3000;
928                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
929         } else if (!err)
930                 err = -EBUSY;
931
932         return err;
933 }
934
935 /* This will reset the tigon3 PHY if there is no valid
936  * link unless the FORCE argument is non-zero.
937  */
938 static int tg3_phy_reset(struct tg3 *tp)
939 {
940         u32 phy_status;
941         int err;
942
943         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
944         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
945         if (err != 0)
946                 return -EBUSY;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
951                 err = tg3_phy_reset_5703_4_5(tp);
952                 if (err)
953                         return err;
954                 goto out;
955         }
956
957         err = tg3_bmcr_reset(tp);
958         if (err)
959                 return err;
960
961 out:
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
964                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
965                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
966                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
967                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
968                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
969         }
970         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
971                 tg3_writephy(tp, 0x1c, 0x8d68);
972                 tg3_writephy(tp, 0x1c, 0x8d68);
973         }
974         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
976                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
977                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
983         }
984         /* Set Extended packet length bit (bit 14) on all chips that */
985         /* support jumbo frames */
986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
987                 /* Cannot do read-modify-write on 5401 */
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
989         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
990                 u32 phy_reg;
991
992                 /* Set bit 14 with read-modify-write to preserve other bits */
993                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
994                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
996         }
997
998         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999          * jumbo frames transmission.
1000          */
1001         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1002                 u32 phy_reg;
1003
1004                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1005                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1007         }
1008
1009         tg3_phy_set_wirespeed(tp);
1010         return 0;
1011 }
1012
1013 static void tg3_frob_aux_power(struct tg3 *tp)
1014 {
1015         struct tg3 *tp_peer = tp;
1016
1017         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018                 return;
1019
1020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1021                 tp_peer = pci_get_drvdata(tp->pdev_peer);
1022                 if (!tp_peer)
1023                         BUG();
1024         }
1025
1026
1027         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1028             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
1029                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1030                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1031                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1032                              (GRC_LCLCTRL_GPIO_OE0 |
1033                               GRC_LCLCTRL_GPIO_OE1 |
1034                               GRC_LCLCTRL_GPIO_OE2 |
1035                               GRC_LCLCTRL_GPIO_OUTPUT0 |
1036                               GRC_LCLCTRL_GPIO_OUTPUT1));
1037                         udelay(100);
1038                 } else {
1039                         u32 no_gpio2;
1040                         u32 grc_local_ctrl;
1041
1042                         if (tp_peer != tp &&
1043                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1044                                 return;
1045
1046                         /* On 5753 and variants, GPIO2 cannot be used. */
1047                         no_gpio2 = tp->nic_sram_data_cfg &
1048                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1049
1050                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1051                                          GRC_LCLCTRL_GPIO_OE1 |
1052                                          GRC_LCLCTRL_GPIO_OE2 |
1053                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1054                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1055                         if (no_gpio2) {
1056                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1057                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1058                         }
1059                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1060                                                 grc_local_ctrl);
1061                         udelay(100);
1062
1063                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1064
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1066                                                 grc_local_ctrl);
1067                         udelay(100);
1068
1069                         if (!no_gpio2) {
1070                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1071                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1072                                        grc_local_ctrl);
1073                                 udelay(100);
1074                         }
1075                 }
1076         } else {
1077                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1078                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1079                         if (tp_peer != tp &&
1080                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1081                                 return;
1082
1083                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1084                              (GRC_LCLCTRL_GPIO_OE1 |
1085                               GRC_LCLCTRL_GPIO_OUTPUT1));
1086                         udelay(100);
1087
1088                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                              (GRC_LCLCTRL_GPIO_OE1));
1090                         udelay(100);
1091
1092                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1093                              (GRC_LCLCTRL_GPIO_OE1 |
1094                               GRC_LCLCTRL_GPIO_OUTPUT1));
1095                         udelay(100);
1096                 }
1097         }
1098 }
1099
1100 static int tg3_setup_phy(struct tg3 *, int);
1101
1102 #define RESET_KIND_SHUTDOWN     0
1103 #define RESET_KIND_INIT         1
1104 #define RESET_KIND_SUSPEND      2
1105
1106 static void tg3_write_sig_post_reset(struct tg3 *, int);
1107 static int tg3_halt_cpu(struct tg3 *, u32);
1108
1109 static int tg3_set_power_state(struct tg3 *tp, int state)
1110 {
1111         u32 misc_host_ctrl;
1112         u16 power_control, power_caps;
1113         int pm = tp->pm_cap;
1114
1115         /* Make sure register accesses (indirect or otherwise)
1116          * will function correctly.
1117          */
1118         pci_write_config_dword(tp->pdev,
1119                                TG3PCI_MISC_HOST_CTRL,
1120                                tp->misc_host_ctrl);
1121
1122         pci_read_config_word(tp->pdev,
1123                              pm + PCI_PM_CTRL,
1124                              &power_control);
1125         power_control |= PCI_PM_CTRL_PME_STATUS;
1126         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1127         switch (state) {
1128         case 0:
1129                 power_control |= 0;
1130                 pci_write_config_word(tp->pdev,
1131                                       pm + PCI_PM_CTRL,
1132                                       power_control);
1133                 udelay(100);    /* Delay after power state change */
1134
1135                 /* Switch out of Vaux if it is not a LOM */
1136                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1137                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1138                         udelay(100);
1139                 }
1140
1141                 return 0;
1142
1143         case 1:
1144                 power_control |= 1;
1145                 break;
1146
1147         case 2:
1148                 power_control |= 2;
1149                 break;
1150
1151         case 3:
1152                 power_control |= 3;
1153                 break;
1154
1155         default:
1156                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1157                        "requested.\n",
1158                        tp->dev->name, state);
1159                 return -EINVAL;
1160         };
1161
1162         power_control |= PCI_PM_CTRL_PME_ENABLE;
1163
1164         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1165         tw32(TG3PCI_MISC_HOST_CTRL,
1166              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1167
1168         if (tp->link_config.phy_is_low_power == 0) {
1169                 tp->link_config.phy_is_low_power = 1;
1170                 tp->link_config.orig_speed = tp->link_config.speed;
1171                 tp->link_config.orig_duplex = tp->link_config.duplex;
1172                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1173         }
1174
1175         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1176                 tp->link_config.speed = SPEED_10;
1177                 tp->link_config.duplex = DUPLEX_HALF;
1178                 tp->link_config.autoneg = AUTONEG_ENABLE;
1179                 tg3_setup_phy(tp, 0);
1180         }
1181
1182         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1183
1184         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1185                 u32 mac_mode;
1186
1187                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1188                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1189                         udelay(40);
1190
1191                         mac_mode = MAC_MODE_PORT_MODE_MII;
1192
1193                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1194                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1195                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1196                 } else {
1197                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1198                 }
1199
1200                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1201                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1202
1203                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1204                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1205                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1206
1207                 tw32_f(MAC_MODE, mac_mode);
1208                 udelay(100);
1209
1210                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1211                 udelay(10);
1212         }
1213
1214         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1215             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1216              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1217                 u32 base_val;
1218
1219                 base_val = tp->pci_clock_ctrl;
1220                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1221                              CLOCK_CTRL_TXCLK_DISABLE);
1222
1223                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1224                      CLOCK_CTRL_ALTCLK |
1225                      CLOCK_CTRL_PWRDOWN_PLL133);
1226                 udelay(40);
1227         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1228                 /* do nothing */
1229         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1230                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1231                 u32 newbits1, newbits2;
1232
1233                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1234                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1235                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1236                                     CLOCK_CTRL_TXCLK_DISABLE |
1237                                     CLOCK_CTRL_ALTCLK);
1238                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1239                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1240                         newbits1 = CLOCK_CTRL_625_CORE;
1241                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1242                 } else {
1243                         newbits1 = CLOCK_CTRL_ALTCLK;
1244                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1245                 }
1246
1247                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1248                 udelay(40);
1249
1250                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1251                 udelay(40);
1252
1253                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1254                         u32 newbits3;
1255
1256                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1257                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1258                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1259                                             CLOCK_CTRL_TXCLK_DISABLE |
1260                                             CLOCK_CTRL_44MHZ_CORE);
1261                         } else {
1262                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1263                         }
1264
1265                         tw32_f(TG3PCI_CLOCK_CTRL,
1266                                          tp->pci_clock_ctrl | newbits3);
1267                         udelay(40);
1268                 }
1269         }
1270
1271         tg3_frob_aux_power(tp);
1272
1273         /* Workaround for unstable PLL clock */
1274         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1275             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1276                 u32 val = tr32(0x7d00);
1277
1278                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1279                 tw32(0x7d00, val);
1280                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1281                         tg3_halt_cpu(tp, RX_CPU_BASE);
1282         }
1283
1284         /* Finally, set the new power state. */
1285         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1286         udelay(100);    /* Delay after power state change */
1287
1288         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1289
1290         return 0;
1291 }
1292
1293 static void tg3_link_report(struct tg3 *tp)
1294 {
1295         if (!netif_carrier_ok(tp->dev)) {
1296                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1297         } else {
1298                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1299                        tp->dev->name,
1300                        (tp->link_config.active_speed == SPEED_1000 ?
1301                         1000 :
1302                         (tp->link_config.active_speed == SPEED_100 ?
1303                          100 : 10)),
1304                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1305                         "full" : "half"));
1306
1307                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1308                        "%s for RX.\n",
1309                        tp->dev->name,
1310                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1311                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1312         }
1313 }
1314
1315 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1316 {
1317         u32 new_tg3_flags = 0;
1318         u32 old_rx_mode = tp->rx_mode;
1319         u32 old_tx_mode = tp->tx_mode;
1320
1321         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1322
1323                 /* Convert 1000BaseX flow control bits to 1000BaseT
1324                  * bits before resolving flow control.
1325                  */
1326                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1327                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1328                                        ADVERTISE_PAUSE_ASYM);
1329                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1330
1331                         if (local_adv & ADVERTISE_1000XPAUSE)
1332                                 local_adv |= ADVERTISE_PAUSE_CAP;
1333                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1334                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1335                         if (remote_adv & LPA_1000XPAUSE)
1336                                 remote_adv |= LPA_PAUSE_CAP;
1337                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1338                                 remote_adv |= LPA_PAUSE_ASYM;
1339                 }
1340
1341                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1342                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1343                                 if (remote_adv & LPA_PAUSE_CAP)
1344                                         new_tg3_flags |=
1345                                                 (TG3_FLAG_RX_PAUSE |
1346                                                 TG3_FLAG_TX_PAUSE);
1347                                 else if (remote_adv & LPA_PAUSE_ASYM)
1348                                         new_tg3_flags |=
1349                                                 (TG3_FLAG_RX_PAUSE);
1350                         } else {
1351                                 if (remote_adv & LPA_PAUSE_CAP)
1352                                         new_tg3_flags |=
1353                                                 (TG3_FLAG_RX_PAUSE |
1354                                                 TG3_FLAG_TX_PAUSE);
1355                         }
1356                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1357                         if ((remote_adv & LPA_PAUSE_CAP) &&
1358                         (remote_adv & LPA_PAUSE_ASYM))
1359                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1360                 }
1361
1362                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1363                 tp->tg3_flags |= new_tg3_flags;
1364         } else {
1365                 new_tg3_flags = tp->tg3_flags;
1366         }
1367
1368         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1369                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1370         else
1371                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1372
1373         if (old_rx_mode != tp->rx_mode) {
1374                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1375         }
1376         
1377         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1378                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1379         else
1380                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1381
1382         if (old_tx_mode != tp->tx_mode) {
1383                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1384         }
1385 }
1386
1387 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1388 {
1389         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1390         case MII_TG3_AUX_STAT_10HALF:
1391                 *speed = SPEED_10;
1392                 *duplex = DUPLEX_HALF;
1393                 break;
1394
1395         case MII_TG3_AUX_STAT_10FULL:
1396                 *speed = SPEED_10;
1397                 *duplex = DUPLEX_FULL;
1398                 break;
1399
1400         case MII_TG3_AUX_STAT_100HALF:
1401                 *speed = SPEED_100;
1402                 *duplex = DUPLEX_HALF;
1403                 break;
1404
1405         case MII_TG3_AUX_STAT_100FULL:
1406                 *speed = SPEED_100;
1407                 *duplex = DUPLEX_FULL;
1408                 break;
1409
1410         case MII_TG3_AUX_STAT_1000HALF:
1411                 *speed = SPEED_1000;
1412                 *duplex = DUPLEX_HALF;
1413                 break;
1414
1415         case MII_TG3_AUX_STAT_1000FULL:
1416                 *speed = SPEED_1000;
1417                 *duplex = DUPLEX_FULL;
1418                 break;
1419
1420         default:
1421                 *speed = SPEED_INVALID;
1422                 *duplex = DUPLEX_INVALID;
1423                 break;
1424         };
1425 }
1426
1427 static void tg3_phy_copper_begin(struct tg3 *tp)
1428 {
1429         u32 new_adv;
1430         int i;
1431
1432         if (tp->link_config.phy_is_low_power) {
1433                 /* Entering low power mode.  Disable gigabit and
1434                  * 100baseT advertisements.
1435                  */
1436                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1437
1438                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1439                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1440                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1441                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1442
1443                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1444         } else if (tp->link_config.speed == SPEED_INVALID) {
1445                 tp->link_config.advertising =
1446                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1447                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1448                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1449                          ADVERTISED_Autoneg | ADVERTISED_MII);
1450
1451                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1452                         tp->link_config.advertising &=
1453                                 ~(ADVERTISED_1000baseT_Half |
1454                                   ADVERTISED_1000baseT_Full);
1455
1456                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1457                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1458                         new_adv |= ADVERTISE_10HALF;
1459                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1460                         new_adv |= ADVERTISE_10FULL;
1461                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1462                         new_adv |= ADVERTISE_100HALF;
1463                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1464                         new_adv |= ADVERTISE_100FULL;
1465                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1466
1467                 if (tp->link_config.advertising &
1468                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1469                         new_adv = 0;
1470                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1471                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1472                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1473                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1474                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1475                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1476                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1477                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1478                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1479                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1480                 } else {
1481                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1482                 }
1483         } else {
1484                 /* Asking for a specific link mode. */
1485                 if (tp->link_config.speed == SPEED_1000) {
1486                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1487                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1488
1489                         if (tp->link_config.duplex == DUPLEX_FULL)
1490                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1491                         else
1492                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1493                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1494                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1495                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1496                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1497                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1498                 } else {
1499                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1500
1501                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1502                         if (tp->link_config.speed == SPEED_100) {
1503                                 if (tp->link_config.duplex == DUPLEX_FULL)
1504                                         new_adv |= ADVERTISE_100FULL;
1505                                 else
1506                                         new_adv |= ADVERTISE_100HALF;
1507                         } else {
1508                                 if (tp->link_config.duplex == DUPLEX_FULL)
1509                                         new_adv |= ADVERTISE_10FULL;
1510                                 else
1511                                         new_adv |= ADVERTISE_10HALF;
1512                         }
1513                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1514                 }
1515         }
1516
1517         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1518             tp->link_config.speed != SPEED_INVALID) {
1519                 u32 bmcr, orig_bmcr;
1520
1521                 tp->link_config.active_speed = tp->link_config.speed;
1522                 tp->link_config.active_duplex = tp->link_config.duplex;
1523
1524                 bmcr = 0;
1525                 switch (tp->link_config.speed) {
1526                 default:
1527                 case SPEED_10:
1528                         break;
1529
1530                 case SPEED_100:
1531                         bmcr |= BMCR_SPEED100;
1532                         break;
1533
1534                 case SPEED_1000:
1535                         bmcr |= TG3_BMCR_SPEED1000;
1536                         break;
1537                 };
1538
1539                 if (tp->link_config.duplex == DUPLEX_FULL)
1540                         bmcr |= BMCR_FULLDPLX;
1541
1542                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1543                     (bmcr != orig_bmcr)) {
1544                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1545                         for (i = 0; i < 1500; i++) {
1546                                 u32 tmp;
1547
1548                                 udelay(10);
1549                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1550                                     tg3_readphy(tp, MII_BMSR, &tmp))
1551                                         continue;
1552                                 if (!(tmp & BMSR_LSTATUS)) {
1553                                         udelay(40);
1554                                         break;
1555                                 }
1556                         }
1557                         tg3_writephy(tp, MII_BMCR, bmcr);
1558                         udelay(40);
1559                 }
1560         } else {
1561                 tg3_writephy(tp, MII_BMCR,
1562                              BMCR_ANENABLE | BMCR_ANRESTART);
1563         }
1564 }
1565
1566 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1567 {
1568         int err;
1569
1570         /* Turn off tap power management. */
1571         /* Set Extended packet length bit */
1572         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1573
1574         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1575         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1576
1577         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1578         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1579
1580         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1581         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1582
1583         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1584         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1585
1586         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1587         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1588
1589         udelay(40);
1590
1591         return err;
1592 }
1593
1594 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1595 {
1596         u32 adv_reg, all_mask;
1597
1598         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1599                 return 0;
1600
1601         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1602                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1603         if ((adv_reg & all_mask) != all_mask)
1604                 return 0;
1605         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1606                 u32 tg3_ctrl;
1607
1608                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1609                         return 0;
1610
1611                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1612                             MII_TG3_CTRL_ADV_1000_FULL);
1613                 if ((tg3_ctrl & all_mask) != all_mask)
1614                         return 0;
1615         }
1616         return 1;
1617 }
1618
1619 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1620 {
1621         int current_link_up;
1622         u32 bmsr, dummy;
1623         u16 current_speed;
1624         u8 current_duplex;
1625         int i, err;
1626
1627         tw32(MAC_EVENT, 0);
1628
1629         tw32_f(MAC_STATUS,
1630              (MAC_STATUS_SYNC_CHANGED |
1631               MAC_STATUS_CFG_CHANGED |
1632               MAC_STATUS_MI_COMPLETION |
1633               MAC_STATUS_LNKSTATE_CHANGED));
1634         udelay(40);
1635
1636         tp->mi_mode = MAC_MI_MODE_BASE;
1637         tw32_f(MAC_MI_MODE, tp->mi_mode);
1638         udelay(80);
1639
1640         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1641
1642         /* Some third-party PHYs need to be reset on link going
1643          * down.
1644          */
1645         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1646              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1647              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1648             netif_carrier_ok(tp->dev)) {
1649                 tg3_readphy(tp, MII_BMSR, &bmsr);
1650                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1651                     !(bmsr & BMSR_LSTATUS))
1652                         force_reset = 1;
1653         }
1654         if (force_reset)
1655                 tg3_phy_reset(tp);
1656
1657         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1658                 tg3_readphy(tp, MII_BMSR, &bmsr);
1659                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1660                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1661                         bmsr = 0;
1662
1663                 if (!(bmsr & BMSR_LSTATUS)) {
1664                         err = tg3_init_5401phy_dsp(tp);
1665                         if (err)
1666                                 return err;
1667
1668                         tg3_readphy(tp, MII_BMSR, &bmsr);
1669                         for (i = 0; i < 1000; i++) {
1670                                 udelay(10);
1671                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1672                                     (bmsr & BMSR_LSTATUS)) {
1673                                         udelay(40);
1674                                         break;
1675                                 }
1676                         }
1677
1678                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1679                             !(bmsr & BMSR_LSTATUS) &&
1680                             tp->link_config.active_speed == SPEED_1000) {
1681                                 err = tg3_phy_reset(tp);
1682                                 if (!err)
1683                                         err = tg3_init_5401phy_dsp(tp);
1684                                 if (err)
1685                                         return err;
1686                         }
1687                 }
1688         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1689                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1690                 /* 5701 {A0,B0} CRC bug workaround */
1691                 tg3_writephy(tp, 0x15, 0x0a75);
1692                 tg3_writephy(tp, 0x1c, 0x8c68);
1693                 tg3_writephy(tp, 0x1c, 0x8d68);
1694                 tg3_writephy(tp, 0x1c, 0x8c68);
1695         }
1696
1697         /* Clear pending interrupts... */
1698         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1699         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1700
1701         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1702                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1703         else
1704                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1705
1706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1708                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1709                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1710                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1711                 else
1712                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1713         }
1714
1715         current_link_up = 0;
1716         current_speed = SPEED_INVALID;
1717         current_duplex = DUPLEX_INVALID;
1718
1719         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1720                 u32 val;
1721
1722                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1723                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1724                 if (!(val & (1 << 10))) {
1725                         val |= (1 << 10);
1726                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1727                         goto relink;
1728                 }
1729         }
1730
1731         bmsr = 0;
1732         for (i = 0; i < 100; i++) {
1733                 tg3_readphy(tp, MII_BMSR, &bmsr);
1734                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1735                     (bmsr & BMSR_LSTATUS))
1736                         break;
1737                 udelay(40);
1738         }
1739
1740         if (bmsr & BMSR_LSTATUS) {
1741                 u32 aux_stat, bmcr;
1742
1743                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1744                 for (i = 0; i < 2000; i++) {
1745                         udelay(10);
1746                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1747                             aux_stat)
1748                                 break;
1749                 }
1750
1751                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1752                                              &current_speed,
1753                                              &current_duplex);
1754
1755                 bmcr = 0;
1756                 for (i = 0; i < 200; i++) {
1757                         tg3_readphy(tp, MII_BMCR, &bmcr);
1758                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1759                                 continue;
1760                         if (bmcr && bmcr != 0x7fff)
1761                                 break;
1762                         udelay(10);
1763                 }
1764
1765                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1766                         if (bmcr & BMCR_ANENABLE) {
1767                                 current_link_up = 1;
1768
1769                                 /* Force autoneg restart if we are exiting
1770                                  * low power mode.
1771                                  */
1772                                 if (!tg3_copper_is_advertising_all(tp))
1773                                         current_link_up = 0;
1774                         } else {
1775                                 current_link_up = 0;
1776                         }
1777                 } else {
1778                         if (!(bmcr & BMCR_ANENABLE) &&
1779                             tp->link_config.speed == current_speed &&
1780                             tp->link_config.duplex == current_duplex) {
1781                                 current_link_up = 1;
1782                         } else {
1783                                 current_link_up = 0;
1784                         }
1785                 }
1786
1787                 tp->link_config.active_speed = current_speed;
1788                 tp->link_config.active_duplex = current_duplex;
1789         }
1790
1791         if (current_link_up == 1 &&
1792             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1793             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1794                 u32 local_adv, remote_adv;
1795
1796                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1797                         local_adv = 0;
1798                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1799
1800                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1801                         remote_adv = 0;
1802
1803                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1804
1805                 /* If we are not advertising full pause capability,
1806                  * something is wrong.  Bring the link down and reconfigure.
1807                  */
1808                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1809                         current_link_up = 0;
1810                 } else {
1811                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1812                 }
1813         }
1814 relink:
1815         if (current_link_up == 0) {
1816                 u32 tmp;
1817
1818                 tg3_phy_copper_begin(tp);
1819
1820                 tg3_readphy(tp, MII_BMSR, &tmp);
1821                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1822                     (tmp & BMSR_LSTATUS))
1823                         current_link_up = 1;
1824         }
1825
1826         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1827         if (current_link_up == 1) {
1828                 if (tp->link_config.active_speed == SPEED_100 ||
1829                     tp->link_config.active_speed == SPEED_10)
1830                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1831                 else
1832                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1833         } else
1834                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1835
1836         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1837         if (tp->link_config.active_duplex == DUPLEX_HALF)
1838                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1839
1840         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1842                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1843                     (current_link_up == 1 &&
1844                      tp->link_config.active_speed == SPEED_10))
1845                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1846         } else {
1847                 if (current_link_up == 1)
1848                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1849         }
1850
1851         /* ??? Without this setting Netgear GA302T PHY does not
1852          * ??? send/receive packets...
1853          */
1854         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1855             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1856                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1857                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1858                 udelay(80);
1859         }
1860
1861         tw32_f(MAC_MODE, tp->mac_mode);
1862         udelay(40);
1863
1864         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1865                 /* Polled via timer. */
1866                 tw32_f(MAC_EVENT, 0);
1867         } else {
1868                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1869         }
1870         udelay(40);
1871
1872         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1873             current_link_up == 1 &&
1874             tp->link_config.active_speed == SPEED_1000 &&
1875             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1876              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1877                 udelay(120);
1878                 tw32_f(MAC_STATUS,
1879                      (MAC_STATUS_SYNC_CHANGED |
1880                       MAC_STATUS_CFG_CHANGED));
1881                 udelay(40);
1882                 tg3_write_mem(tp,
1883                               NIC_SRAM_FIRMWARE_MBOX,
1884                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1885         }
1886
1887         if (current_link_up != netif_carrier_ok(tp->dev)) {
1888                 if (current_link_up)
1889                         netif_carrier_on(tp->dev);
1890                 else
1891                         netif_carrier_off(tp->dev);
1892                 tg3_link_report(tp);
1893         }
1894
1895         return 0;
1896 }
1897
1898 struct tg3_fiber_aneginfo {
1899         int state;
1900 #define ANEG_STATE_UNKNOWN              0
1901 #define ANEG_STATE_AN_ENABLE            1
1902 #define ANEG_STATE_RESTART_INIT         2
1903 #define ANEG_STATE_RESTART              3
1904 #define ANEG_STATE_DISABLE_LINK_OK      4
1905 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1906 #define ANEG_STATE_ABILITY_DETECT       6
1907 #define ANEG_STATE_ACK_DETECT_INIT      7
1908 #define ANEG_STATE_ACK_DETECT           8
1909 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1910 #define ANEG_STATE_COMPLETE_ACK         10
1911 #define ANEG_STATE_IDLE_DETECT_INIT     11
1912 #define ANEG_STATE_IDLE_DETECT          12
1913 #define ANEG_STATE_LINK_OK              13
1914 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1915 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1916
1917         u32 flags;
1918 #define MR_AN_ENABLE            0x00000001
1919 #define MR_RESTART_AN           0x00000002
1920 #define MR_AN_COMPLETE          0x00000004
1921 #define MR_PAGE_RX              0x00000008
1922 #define MR_NP_LOADED            0x00000010
1923 #define MR_TOGGLE_TX            0x00000020
1924 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1925 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1926 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1927 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1928 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1929 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1930 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1931 #define MR_TOGGLE_RX            0x00002000
1932 #define MR_NP_RX                0x00004000
1933
1934 #define MR_LINK_OK              0x80000000
1935
1936         unsigned long link_time, cur_time;
1937
1938         u32 ability_match_cfg;
1939         int ability_match_count;
1940
1941         char ability_match, idle_match, ack_match;
1942
1943         u32 txconfig, rxconfig;
1944 #define ANEG_CFG_NP             0x00000080
1945 #define ANEG_CFG_ACK            0x00000040
1946 #define ANEG_CFG_RF2            0x00000020
1947 #define ANEG_CFG_RF1            0x00000010
1948 #define ANEG_CFG_PS2            0x00000001
1949 #define ANEG_CFG_PS1            0x00008000
1950 #define ANEG_CFG_HD             0x00004000
1951 #define ANEG_CFG_FD             0x00002000
1952 #define ANEG_CFG_INVAL          0x00001f06
1953
1954 };
1955 #define ANEG_OK         0
1956 #define ANEG_DONE       1
1957 #define ANEG_TIMER_ENAB 2
1958 #define ANEG_FAILED     -1
1959
1960 #define ANEG_STATE_SETTLE_TIME  10000
1961
1962 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1963                                    struct tg3_fiber_aneginfo *ap)
1964 {
1965         unsigned long delta;
1966         u32 rx_cfg_reg;
1967         int ret;
1968
1969         if (ap->state == ANEG_STATE_UNKNOWN) {
1970                 ap->rxconfig = 0;
1971                 ap->link_time = 0;
1972                 ap->cur_time = 0;
1973                 ap->ability_match_cfg = 0;
1974                 ap->ability_match_count = 0;
1975                 ap->ability_match = 0;
1976                 ap->idle_match = 0;
1977                 ap->ack_match = 0;
1978         }
1979         ap->cur_time++;
1980
1981         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1982                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1983
1984                 if (rx_cfg_reg != ap->ability_match_cfg) {
1985                         ap->ability_match_cfg = rx_cfg_reg;
1986                         ap->ability_match = 0;
1987                         ap->ability_match_count = 0;
1988                 } else {
1989                         if (++ap->ability_match_count > 1) {
1990                                 ap->ability_match = 1;
1991                                 ap->ability_match_cfg = rx_cfg_reg;
1992                         }
1993                 }
1994                 if (rx_cfg_reg & ANEG_CFG_ACK)
1995                         ap->ack_match = 1;
1996                 else
1997                         ap->ack_match = 0;
1998
1999                 ap->idle_match = 0;
2000         } else {
2001                 ap->idle_match = 1;
2002                 ap->ability_match_cfg = 0;
2003                 ap->ability_match_count = 0;
2004                 ap->ability_match = 0;
2005                 ap->ack_match = 0;
2006
2007                 rx_cfg_reg = 0;
2008         }
2009
2010         ap->rxconfig = rx_cfg_reg;
2011         ret = ANEG_OK;
2012
2013         switch(ap->state) {
2014         case ANEG_STATE_UNKNOWN:
2015                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2016                         ap->state = ANEG_STATE_AN_ENABLE;
2017
2018                 /* fallthru */
2019         case ANEG_STATE_AN_ENABLE:
2020                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2021                 if (ap->flags & MR_AN_ENABLE) {
2022                         ap->link_time = 0;
2023                         ap->cur_time = 0;
2024                         ap->ability_match_cfg = 0;
2025                         ap->ability_match_count = 0;
2026                         ap->ability_match = 0;
2027                         ap->idle_match = 0;
2028                         ap->ack_match = 0;
2029
2030                         ap->state = ANEG_STATE_RESTART_INIT;
2031                 } else {
2032                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2033                 }
2034                 break;
2035
2036         case ANEG_STATE_RESTART_INIT:
2037                 ap->link_time = ap->cur_time;
2038                 ap->flags &= ~(MR_NP_LOADED);
2039                 ap->txconfig = 0;
2040                 tw32(MAC_TX_AUTO_NEG, 0);
2041                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044
2045                 ret = ANEG_TIMER_ENAB;
2046                 ap->state = ANEG_STATE_RESTART;
2047
2048                 /* fallthru */
2049         case ANEG_STATE_RESTART:
2050                 delta = ap->cur_time - ap->link_time;
2051                 if (delta > ANEG_STATE_SETTLE_TIME) {
2052                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2053                 } else {
2054                         ret = ANEG_TIMER_ENAB;
2055                 }
2056                 break;
2057
2058         case ANEG_STATE_DISABLE_LINK_OK:
2059                 ret = ANEG_DONE;
2060                 break;
2061
2062         case ANEG_STATE_ABILITY_DETECT_INIT:
2063                 ap->flags &= ~(MR_TOGGLE_TX);
2064                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2065                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2066                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2067                 tw32_f(MAC_MODE, tp->mac_mode);
2068                 udelay(40);
2069
2070                 ap->state = ANEG_STATE_ABILITY_DETECT;
2071                 break;
2072
2073         case ANEG_STATE_ABILITY_DETECT:
2074                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2075                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2076                 }
2077                 break;
2078
2079         case ANEG_STATE_ACK_DETECT_INIT:
2080                 ap->txconfig |= ANEG_CFG_ACK;
2081                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2082                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2083                 tw32_f(MAC_MODE, tp->mac_mode);
2084                 udelay(40);
2085
2086                 ap->state = ANEG_STATE_ACK_DETECT;
2087
2088                 /* fallthru */
2089         case ANEG_STATE_ACK_DETECT:
2090                 if (ap->ack_match != 0) {
2091                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2092                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2093                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2094                         } else {
2095                                 ap->state = ANEG_STATE_AN_ENABLE;
2096                         }
2097                 } else if (ap->ability_match != 0 &&
2098                            ap->rxconfig == 0) {
2099                         ap->state = ANEG_STATE_AN_ENABLE;
2100                 }
2101                 break;
2102
2103         case ANEG_STATE_COMPLETE_ACK_INIT:
2104                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2105                         ret = ANEG_FAILED;
2106                         break;
2107                 }
2108                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2109                                MR_LP_ADV_HALF_DUPLEX |
2110                                MR_LP_ADV_SYM_PAUSE |
2111                                MR_LP_ADV_ASYM_PAUSE |
2112                                MR_LP_ADV_REMOTE_FAULT1 |
2113                                MR_LP_ADV_REMOTE_FAULT2 |
2114                                MR_LP_ADV_NEXT_PAGE |
2115                                MR_TOGGLE_RX |
2116                                MR_NP_RX);
2117                 if (ap->rxconfig & ANEG_CFG_FD)
2118                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2119                 if (ap->rxconfig & ANEG_CFG_HD)
2120                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2121                 if (ap->rxconfig & ANEG_CFG_PS1)
2122                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2123                 if (ap->rxconfig & ANEG_CFG_PS2)
2124                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2125                 if (ap->rxconfig & ANEG_CFG_RF1)
2126                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2127                 if (ap->rxconfig & ANEG_CFG_RF2)
2128                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2129                 if (ap->rxconfig & ANEG_CFG_NP)
2130                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2131
2132                 ap->link_time = ap->cur_time;
2133
2134                 ap->flags ^= (MR_TOGGLE_TX);
2135                 if (ap->rxconfig & 0x0008)
2136                         ap->flags |= MR_TOGGLE_RX;
2137                 if (ap->rxconfig & ANEG_CFG_NP)
2138                         ap->flags |= MR_NP_RX;
2139                 ap->flags |= MR_PAGE_RX;
2140
2141                 ap->state = ANEG_STATE_COMPLETE_ACK;
2142                 ret = ANEG_TIMER_ENAB;
2143                 break;
2144
2145         case ANEG_STATE_COMPLETE_ACK:
2146                 if (ap->ability_match != 0 &&
2147                     ap->rxconfig == 0) {
2148                         ap->state = ANEG_STATE_AN_ENABLE;
2149                         break;
2150                 }
2151                 delta = ap->cur_time - ap->link_time;
2152                 if (delta > ANEG_STATE_SETTLE_TIME) {
2153                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2154                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2155                         } else {
2156                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2157                                     !(ap->flags & MR_NP_RX)) {
2158                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2159                                 } else {
2160                                         ret = ANEG_FAILED;
2161                                 }
2162                         }
2163                 }
2164                 break;
2165
2166         case ANEG_STATE_IDLE_DETECT_INIT:
2167                 ap->link_time = ap->cur_time;
2168                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169                 tw32_f(MAC_MODE, tp->mac_mode);
2170                 udelay(40);
2171
2172                 ap->state = ANEG_STATE_IDLE_DETECT;
2173                 ret = ANEG_TIMER_ENAB;
2174                 break;
2175
2176         case ANEG_STATE_IDLE_DETECT:
2177                 if (ap->ability_match != 0 &&
2178                     ap->rxconfig == 0) {
2179                         ap->state = ANEG_STATE_AN_ENABLE;
2180                         break;
2181                 }
2182                 delta = ap->cur_time - ap->link_time;
2183                 if (delta > ANEG_STATE_SETTLE_TIME) {
2184                         /* XXX another gem from the Broadcom driver :( */
2185                         ap->state = ANEG_STATE_LINK_OK;
2186                 }
2187                 break;
2188
2189         case ANEG_STATE_LINK_OK:
2190                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2191                 ret = ANEG_DONE;
2192                 break;
2193
2194         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2195                 /* ??? unimplemented */
2196                 break;
2197
2198         case ANEG_STATE_NEXT_PAGE_WAIT:
2199                 /* ??? unimplemented */
2200                 break;
2201
2202         default:
2203                 ret = ANEG_FAILED;
2204                 break;
2205         };
2206
2207         return ret;
2208 }
2209
2210 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2211 {
2212         int res = 0;
2213         struct tg3_fiber_aneginfo aninfo;
2214         int status = ANEG_FAILED;
2215         unsigned int tick;
2216         u32 tmp;
2217
2218         tw32_f(MAC_TX_AUTO_NEG, 0);
2219
2220         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2221         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2222         udelay(40);
2223
2224         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2225         udelay(40);
2226
2227         memset(&aninfo, 0, sizeof(aninfo));
2228         aninfo.flags |= MR_AN_ENABLE;
2229         aninfo.state = ANEG_STATE_UNKNOWN;
2230         aninfo.cur_time = 0;
2231         tick = 0;
2232         while (++tick < 195000) {
2233                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2234                 if (status == ANEG_DONE || status == ANEG_FAILED)
2235                         break;
2236
2237                 udelay(1);
2238         }
2239
2240         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2241         tw32_f(MAC_MODE, tp->mac_mode);
2242         udelay(40);
2243
2244         *flags = aninfo.flags;
2245
2246         if (status == ANEG_DONE &&
2247             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2248                              MR_LP_ADV_FULL_DUPLEX)))
2249                 res = 1;
2250
2251         return res;
2252 }
2253
2254 static void tg3_init_bcm8002(struct tg3 *tp)
2255 {
2256         u32 mac_status = tr32(MAC_STATUS);
2257         int i;
2258
2259         /* Reset when initting first time or we have a link. */
2260         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2261             !(mac_status & MAC_STATUS_PCS_SYNCED))
2262                 return;
2263
2264         /* Set PLL lock range. */
2265         tg3_writephy(tp, 0x16, 0x8007);
2266
2267         /* SW reset */
2268         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2269
2270         /* Wait for reset to complete. */
2271         /* XXX schedule_timeout() ... */
2272         for (i = 0; i < 500; i++)
2273                 udelay(10);
2274
2275         /* Config mode; select PMA/Ch 1 regs. */
2276         tg3_writephy(tp, 0x10, 0x8411);
2277
2278         /* Enable auto-lock and comdet, select txclk for tx. */
2279         tg3_writephy(tp, 0x11, 0x0a10);
2280
2281         tg3_writephy(tp, 0x18, 0x00a0);
2282         tg3_writephy(tp, 0x16, 0x41ff);
2283
2284         /* Assert and deassert POR. */
2285         tg3_writephy(tp, 0x13, 0x0400);
2286         udelay(40);
2287         tg3_writephy(tp, 0x13, 0x0000);
2288
2289         tg3_writephy(tp, 0x11, 0x0a50);
2290         udelay(40);
2291         tg3_writephy(tp, 0x11, 0x0a10);
2292
2293         /* Wait for signal to stabilize */
2294         /* XXX schedule_timeout() ... */
2295         for (i = 0; i < 15000; i++)
2296                 udelay(10);
2297
2298         /* Deselect the channel register so we can read the PHYID
2299          * later.
2300          */
2301         tg3_writephy(tp, 0x10, 0x8011);
2302 }
2303
2304 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2305 {
2306         u32 sg_dig_ctrl, sg_dig_status;
2307         u32 serdes_cfg, expected_sg_dig_ctrl;
2308         int workaround, port_a;
2309         int current_link_up;
2310
2311         serdes_cfg = 0;
2312         expected_sg_dig_ctrl = 0;
2313         workaround = 0;
2314         port_a = 1;
2315         current_link_up = 0;
2316
2317         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2318             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2319                 workaround = 1;
2320                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2321                         port_a = 0;
2322
2323                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2324                 /* preserve bits 20-23 for voltage regulator */
2325                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2326         }
2327
2328         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2329
2330         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2331                 if (sg_dig_ctrl & (1 << 31)) {
2332                         if (workaround) {
2333                                 u32 val = serdes_cfg;
2334
2335                                 if (port_a)
2336                                         val |= 0xc010000;
2337                                 else
2338                                         val |= 0x4010000;
2339                                 tw32_f(MAC_SERDES_CFG, val);
2340                         }
2341                         tw32_f(SG_DIG_CTRL, 0x01388400);
2342                 }
2343                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2344                         tg3_setup_flow_control(tp, 0, 0);
2345                         current_link_up = 1;
2346                 }
2347                 goto out;
2348         }
2349
2350         /* Want auto-negotiation.  */
2351         expected_sg_dig_ctrl = 0x81388400;
2352
2353         /* Pause capability */
2354         expected_sg_dig_ctrl |= (1 << 11);
2355
2356         /* Asymettric pause */
2357         expected_sg_dig_ctrl |= (1 << 12);
2358
2359         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2360                 if (workaround)
2361                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2362                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2363                 udelay(5);
2364                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2365
2366                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2367         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2368                                  MAC_STATUS_SIGNAL_DET)) {
2369                 int i;
2370
2371                 /* Giver time to negotiate (~200ms) */
2372                 for (i = 0; i < 40000; i++) {
2373                         sg_dig_status = tr32(SG_DIG_STATUS);
2374                         if (sg_dig_status & (0x3))
2375                                 break;
2376                         udelay(5);
2377                 }
2378                 mac_status = tr32(MAC_STATUS);
2379
2380                 if ((sg_dig_status & (1 << 1)) &&
2381                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2382                         u32 local_adv, remote_adv;
2383
2384                         local_adv = ADVERTISE_PAUSE_CAP;
2385                         remote_adv = 0;
2386                         if (sg_dig_status & (1 << 19))
2387                                 remote_adv |= LPA_PAUSE_CAP;
2388                         if (sg_dig_status & (1 << 20))
2389                                 remote_adv |= LPA_PAUSE_ASYM;
2390
2391                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2392                         current_link_up = 1;
2393                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2394                 } else if (!(sg_dig_status & (1 << 1))) {
2395                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2396                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2397                         else {
2398                                 if (workaround) {
2399                                         u32 val = serdes_cfg;
2400
2401                                         if (port_a)
2402                                                 val |= 0xc010000;
2403                                         else
2404                                                 val |= 0x4010000;
2405
2406                                         tw32_f(MAC_SERDES_CFG, val);
2407                                 }
2408
2409                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2410                                 udelay(40);
2411
2412                                 /* Link parallel detection - link is up */
2413                                 /* only if we have PCS_SYNC and not */
2414                                 /* receiving config code words */
2415                                 mac_status = tr32(MAC_STATUS);
2416                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2417                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2418                                         tg3_setup_flow_control(tp, 0, 0);
2419                                         current_link_up = 1;
2420                                 }
2421                         }
2422                 }
2423         }
2424
2425 out:
2426         return current_link_up;
2427 }
2428
2429 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2430 {
2431         int current_link_up = 0;
2432
2433         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2434                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2435                 goto out;
2436         }
2437
2438         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2439                 u32 flags;
2440                 int i;
2441   
2442                 if (fiber_autoneg(tp, &flags)) {
2443                         u32 local_adv, remote_adv;
2444
2445                         local_adv = ADVERTISE_PAUSE_CAP;
2446                         remote_adv = 0;
2447                         if (flags & MR_LP_ADV_SYM_PAUSE)
2448                                 remote_adv |= LPA_PAUSE_CAP;
2449                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2450                                 remote_adv |= LPA_PAUSE_ASYM;
2451
2452                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2453
2454                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2455                         current_link_up = 1;
2456                 }
2457                 for (i = 0; i < 30; i++) {
2458                         udelay(20);
2459                         tw32_f(MAC_STATUS,
2460                                (MAC_STATUS_SYNC_CHANGED |
2461                                 MAC_STATUS_CFG_CHANGED));
2462                         udelay(40);
2463                         if ((tr32(MAC_STATUS) &
2464                              (MAC_STATUS_SYNC_CHANGED |
2465                               MAC_STATUS_CFG_CHANGED)) == 0)
2466                                 break;
2467                 }
2468
2469                 mac_status = tr32(MAC_STATUS);
2470                 if (current_link_up == 0 &&
2471                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2472                     !(mac_status & MAC_STATUS_RCVD_CFG))
2473                         current_link_up = 1;
2474         } else {
2475                 /* Forcing 1000FD link up. */
2476                 current_link_up = 1;
2477                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2478
2479                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2480                 udelay(40);
2481         }
2482
2483 out:
2484         return current_link_up;
2485 }
2486
2487 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2488 {
2489         u32 orig_pause_cfg;
2490         u16 orig_active_speed;
2491         u8 orig_active_duplex;
2492         u32 mac_status;
2493         int current_link_up;
2494         int i;
2495
2496         orig_pause_cfg =
2497                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2498                                   TG3_FLAG_TX_PAUSE));
2499         orig_active_speed = tp->link_config.active_speed;
2500         orig_active_duplex = tp->link_config.active_duplex;
2501
2502         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2503             netif_carrier_ok(tp->dev) &&
2504             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2505                 mac_status = tr32(MAC_STATUS);
2506                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2507                                MAC_STATUS_SIGNAL_DET |
2508                                MAC_STATUS_CFG_CHANGED |
2509                                MAC_STATUS_RCVD_CFG);
2510                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2511                                    MAC_STATUS_SIGNAL_DET)) {
2512                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2513                                             MAC_STATUS_CFG_CHANGED));
2514                         return 0;
2515                 }
2516         }
2517
2518         tw32_f(MAC_TX_AUTO_NEG, 0);
2519
2520         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2521         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         if (tp->phy_id == PHY_ID_BCM8002)
2526                 tg3_init_bcm8002(tp);
2527
2528         /* Enable link change event even when serdes polling.  */
2529         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2530         udelay(40);
2531
2532         current_link_up = 0;
2533         mac_status = tr32(MAC_STATUS);
2534
2535         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2536                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2537         else
2538                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2539
2540         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2541         tw32_f(MAC_MODE, tp->mac_mode);
2542         udelay(40);
2543
2544         tp->hw_status->status =
2545                 (SD_STATUS_UPDATED |
2546                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2547
2548         for (i = 0; i < 100; i++) {
2549                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2550                                     MAC_STATUS_CFG_CHANGED));
2551                 udelay(5);
2552                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2553                                          MAC_STATUS_CFG_CHANGED)) == 0)
2554                         break;
2555         }
2556
2557         mac_status = tr32(MAC_STATUS);
2558         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2559                 current_link_up = 0;
2560                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2561                         tw32_f(MAC_MODE, (tp->mac_mode |
2562                                           MAC_MODE_SEND_CONFIGS));
2563                         udelay(1);
2564                         tw32_f(MAC_MODE, tp->mac_mode);
2565                 }
2566         }
2567
2568         if (current_link_up == 1) {
2569                 tp->link_config.active_speed = SPEED_1000;
2570                 tp->link_config.active_duplex = DUPLEX_FULL;
2571                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2572                                     LED_CTRL_LNKLED_OVERRIDE |
2573                                     LED_CTRL_1000MBPS_ON));
2574         } else {
2575                 tp->link_config.active_speed = SPEED_INVALID;
2576                 tp->link_config.active_duplex = DUPLEX_INVALID;
2577                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2578                                     LED_CTRL_LNKLED_OVERRIDE |
2579                                     LED_CTRL_TRAFFIC_OVERRIDE));
2580         }
2581
2582         if (current_link_up != netif_carrier_ok(tp->dev)) {
2583                 if (current_link_up)
2584                         netif_carrier_on(tp->dev);
2585                 else
2586                         netif_carrier_off(tp->dev);
2587                 tg3_link_report(tp);
2588         } else {
2589                 u32 now_pause_cfg =
2590                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                          TG3_FLAG_TX_PAUSE);
2592                 if (orig_pause_cfg != now_pause_cfg ||
2593                     orig_active_speed != tp->link_config.active_speed ||
2594                     orig_active_duplex != tp->link_config.active_duplex)
2595                         tg3_link_report(tp);
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2602 {
2603         int current_link_up, err = 0;
2604         u32 bmsr, bmcr;
2605         u16 current_speed;
2606         u8 current_duplex;
2607
2608         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2609         tw32_f(MAC_MODE, tp->mac_mode);
2610         udelay(40);
2611
2612         tw32(MAC_EVENT, 0);
2613
2614         tw32_f(MAC_STATUS,
2615              (MAC_STATUS_SYNC_CHANGED |
2616               MAC_STATUS_CFG_CHANGED |
2617               MAC_STATUS_MI_COMPLETION |
2618               MAC_STATUS_LNKSTATE_CHANGED));
2619         udelay(40);
2620
2621         if (force_reset)
2622                 tg3_phy_reset(tp);
2623
2624         current_link_up = 0;
2625         current_speed = SPEED_INVALID;
2626         current_duplex = DUPLEX_INVALID;
2627
2628         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2629         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2630
2631         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2632
2633         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2634             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2635                 /* do nothing, just check for link up at the end */
2636         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2637                 u32 adv, new_adv;
2638
2639                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2640                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2641                                   ADVERTISE_1000XPAUSE |
2642                                   ADVERTISE_1000XPSE_ASYM |
2643                                   ADVERTISE_SLCT);
2644
2645                 /* Always advertise symmetric PAUSE just like copper */
2646                 new_adv |= ADVERTISE_1000XPAUSE;
2647
2648                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2649                         new_adv |= ADVERTISE_1000XHALF;
2650                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2651                         new_adv |= ADVERTISE_1000XFULL;
2652
2653                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2654                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2655                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2656                         tg3_writephy(tp, MII_BMCR, bmcr);
2657
2658                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2659                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2660                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2661
2662                         return err;
2663                 }
2664         } else {
2665                 u32 new_bmcr;
2666
2667                 bmcr &= ~BMCR_SPEED1000;
2668                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2669
2670                 if (tp->link_config.duplex == DUPLEX_FULL)
2671                         new_bmcr |= BMCR_FULLDPLX;
2672
2673                 if (new_bmcr != bmcr) {
2674                         /* BMCR_SPEED1000 is a reserved bit that needs
2675                          * to be set on write.
2676                          */
2677                         new_bmcr |= BMCR_SPEED1000;
2678
2679                         /* Force a linkdown */
2680                         if (netif_carrier_ok(tp->dev)) {
2681                                 u32 adv;
2682
2683                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2684                                 adv &= ~(ADVERTISE_1000XFULL |
2685                                          ADVERTISE_1000XHALF |
2686                                          ADVERTISE_SLCT);
2687                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2688                                 tg3_writephy(tp, MII_BMCR, bmcr |
2689                                                            BMCR_ANRESTART |
2690                                                            BMCR_ANENABLE);
2691                                 udelay(10);
2692                                 netif_carrier_off(tp->dev);
2693                         }
2694                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2695                         bmcr = new_bmcr;
2696                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2698                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2699                 }
2700         }
2701
2702         if (bmsr & BMSR_LSTATUS) {
2703                 current_speed = SPEED_1000;
2704                 current_link_up = 1;
2705                 if (bmcr & BMCR_FULLDPLX)
2706                         current_duplex = DUPLEX_FULL;
2707                 else
2708                         current_duplex = DUPLEX_HALF;
2709
2710                 if (bmcr & BMCR_ANENABLE) {
2711                         u32 local_adv, remote_adv, common;
2712
2713                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2714                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2715                         common = local_adv & remote_adv;
2716                         if (common & (ADVERTISE_1000XHALF |
2717                                       ADVERTISE_1000XFULL)) {
2718                                 if (common & ADVERTISE_1000XFULL)
2719                                         current_duplex = DUPLEX_FULL;
2720                                 else
2721                                         current_duplex = DUPLEX_HALF;
2722
2723                                 tg3_setup_flow_control(tp, local_adv,
2724                                                        remote_adv);
2725                         }
2726                         else
2727                                 current_link_up = 0;
2728                 }
2729         }
2730
2731         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2732         if (tp->link_config.active_duplex == DUPLEX_HALF)
2733                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2734
2735         tw32_f(MAC_MODE, tp->mac_mode);
2736         udelay(40);
2737
2738         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2739
2740         tp->link_config.active_speed = current_speed;
2741         tp->link_config.active_duplex = current_duplex;
2742
2743         if (current_link_up != netif_carrier_ok(tp->dev)) {
2744                 if (current_link_up)
2745                         netif_carrier_on(tp->dev);
2746                 else {
2747                         netif_carrier_off(tp->dev);
2748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2749                 }
2750                 tg3_link_report(tp);
2751         }
2752         return err;
2753 }
2754
2755 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2756 {
2757         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2758                 /* Give autoneg time to complete. */
2759                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2760                 return;
2761         }
2762         if (!netif_carrier_ok(tp->dev) &&
2763             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2764                 u32 bmcr;
2765
2766                 tg3_readphy(tp, MII_BMCR, &bmcr);
2767                 if (bmcr & BMCR_ANENABLE) {
2768                         u32 phy1, phy2;
2769
2770                         /* Select shadow register 0x1f */
2771                         tg3_writephy(tp, 0x1c, 0x7c00);
2772                         tg3_readphy(tp, 0x1c, &phy1);
2773
2774                         /* Select expansion interrupt status register */
2775                         tg3_writephy(tp, 0x17, 0x0f01);
2776                         tg3_readphy(tp, 0x15, &phy2);
2777                         tg3_readphy(tp, 0x15, &phy2);
2778
2779                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2780                                 /* We have signal detect and not receiving
2781                                  * config code words, link is up by parallel
2782                                  * detection.
2783                                  */
2784
2785                                 bmcr &= ~BMCR_ANENABLE;
2786                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2787                                 tg3_writephy(tp, MII_BMCR, bmcr);
2788                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2789                         }
2790                 }
2791         }
2792         else if (netif_carrier_ok(tp->dev) &&
2793                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2794                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2795                 u32 phy2;
2796
2797                 /* Select expansion interrupt status register */
2798                 tg3_writephy(tp, 0x17, 0x0f01);
2799                 tg3_readphy(tp, 0x15, &phy2);
2800                 if (phy2 & 0x20) {
2801                         u32 bmcr;
2802
2803                         /* Config code words received, turn on autoneg. */
2804                         tg3_readphy(tp, MII_BMCR, &bmcr);
2805                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2806
2807                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2808
2809                 }
2810         }
2811 }
2812
2813 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2814 {
2815         int err;
2816
2817         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2818                 err = tg3_setup_fiber_phy(tp, force_reset);
2819         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2820                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2821         } else {
2822                 err = tg3_setup_copper_phy(tp, force_reset);
2823         }
2824
2825         if (tp->link_config.active_speed == SPEED_1000 &&
2826             tp->link_config.active_duplex == DUPLEX_HALF)
2827                 tw32(MAC_TX_LENGTHS,
2828                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2829                       (6 << TX_LENGTHS_IPG_SHIFT) |
2830                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2831         else
2832                 tw32(MAC_TX_LENGTHS,
2833                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2834                       (6 << TX_LENGTHS_IPG_SHIFT) |
2835                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2836
2837         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2838                 if (netif_carrier_ok(tp->dev)) {
2839                         tw32(HOSTCC_STAT_COAL_TICKS,
2840                              tp->coal.stats_block_coalesce_usecs);
2841                 } else {
2842                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2843                 }
2844         }
2845
2846         return err;
2847 }
2848
2849 /* Tigon3 never reports partial packet sends.  So we do not
2850  * need special logic to handle SKBs that have not had all
2851  * of their frags sent yet, like SunGEM does.
2852  */
2853 static void tg3_tx(struct tg3 *tp)
2854 {
2855         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2856         u32 sw_idx = tp->tx_cons;
2857
2858         while (sw_idx != hw_idx) {
2859                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2860                 struct sk_buff *skb = ri->skb;
2861                 int i;
2862
2863                 if (unlikely(skb == NULL))
2864                         BUG();
2865
2866                 pci_unmap_single(tp->pdev,
2867                                  pci_unmap_addr(ri, mapping),
2868                                  skb_headlen(skb),
2869                                  PCI_DMA_TODEVICE);
2870
2871                 ri->skb = NULL;
2872
2873                 sw_idx = NEXT_TX(sw_idx);
2874
2875                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2876                         if (unlikely(sw_idx == hw_idx))
2877                                 BUG();
2878
2879                         ri = &tp->tx_buffers[sw_idx];
2880                         if (unlikely(ri->skb != NULL))
2881                                 BUG();
2882
2883                         pci_unmap_page(tp->pdev,
2884                                        pci_unmap_addr(ri, mapping),
2885                                        skb_shinfo(skb)->frags[i].size,
2886                                        PCI_DMA_TODEVICE);
2887
2888                         sw_idx = NEXT_TX(sw_idx);
2889                 }
2890
2891                 dev_kfree_skb(skb);
2892         }
2893
2894         tp->tx_cons = sw_idx;
2895
2896         if (unlikely(netif_queue_stopped(tp->dev))) {
2897                 spin_lock(&tp->tx_lock);
2898                 if (netif_queue_stopped(tp->dev) &&
2899                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2900                         netif_wake_queue(tp->dev);
2901                 spin_unlock(&tp->tx_lock);
2902         }
2903 }
2904
2905 /* Returns size of skb allocated or < 0 on error.
2906  *
2907  * We only need to fill in the address because the other members
2908  * of the RX descriptor are invariant, see tg3_init_rings.
2909  *
2910  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2911  * posting buffers we only dirty the first cache line of the RX
2912  * descriptor (containing the address).  Whereas for the RX status
2913  * buffers the cpu only reads the last cacheline of the RX descriptor
2914  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2915  */
2916 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2917                             int src_idx, u32 dest_idx_unmasked)
2918 {
2919         struct tg3_rx_buffer_desc *desc;
2920         struct ring_info *map, *src_map;
2921         struct sk_buff *skb;
2922         dma_addr_t mapping;
2923         int skb_size, dest_idx;
2924
2925         src_map = NULL;
2926         switch (opaque_key) {
2927         case RXD_OPAQUE_RING_STD:
2928                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2929                 desc = &tp->rx_std[dest_idx];
2930                 map = &tp->rx_std_buffers[dest_idx];
2931                 if (src_idx >= 0)
2932                         src_map = &tp->rx_std_buffers[src_idx];
2933                 skb_size = tp->rx_pkt_buf_sz;
2934                 break;
2935
2936         case RXD_OPAQUE_RING_JUMBO:
2937                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2938                 desc = &tp->rx_jumbo[dest_idx];
2939                 map = &tp->rx_jumbo_buffers[dest_idx];
2940                 if (src_idx >= 0)
2941                         src_map = &tp->rx_jumbo_buffers[src_idx];
2942                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2943                 break;
2944
2945         default:
2946                 return -EINVAL;
2947         };
2948
2949         /* Do not overwrite any of the map or rp information
2950          * until we are sure we can commit to a new buffer.
2951          *
2952          * Callers depend upon this behavior and assume that
2953          * we leave everything unchanged if we fail.
2954          */
2955         skb = dev_alloc_skb(skb_size);
2956         if (skb == NULL)
2957                 return -ENOMEM;
2958
2959         skb->dev = tp->dev;
2960         skb_reserve(skb, tp->rx_offset);
2961
2962         mapping = pci_map_single(tp->pdev, skb->data,
2963                                  skb_size - tp->rx_offset,
2964                                  PCI_DMA_FROMDEVICE);
2965
2966         map->skb = skb;
2967         pci_unmap_addr_set(map, mapping, mapping);
2968
2969         if (src_map != NULL)
2970                 src_map->skb = NULL;
2971
2972         desc->addr_hi = ((u64)mapping >> 32);
2973         desc->addr_lo = ((u64)mapping & 0xffffffff);
2974
2975         return skb_size;
2976 }
2977
2978 /* We only need to move over in the address because the other
2979  * members of the RX descriptor are invariant.  See notes above
2980  * tg3_alloc_rx_skb for full details.
2981  */
2982 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2983                            int src_idx, u32 dest_idx_unmasked)
2984 {
2985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2986         struct ring_info *src_map, *dest_map;
2987         int dest_idx;
2988
2989         switch (opaque_key) {
2990         case RXD_OPAQUE_RING_STD:
2991                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2992                 dest_desc = &tp->rx_std[dest_idx];
2993                 dest_map = &tp->rx_std_buffers[dest_idx];
2994                 src_desc = &tp->rx_std[src_idx];
2995                 src_map = &tp->rx_std_buffers[src_idx];
2996                 break;
2997
2998         case RXD_OPAQUE_RING_JUMBO:
2999                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3000                 dest_desc = &tp->rx_jumbo[dest_idx];
3001                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3002                 src_desc = &tp->rx_jumbo[src_idx];
3003                 src_map = &tp->rx_jumbo_buffers[src_idx];
3004                 break;
3005
3006         default:
3007                 return;
3008         };
3009
3010         dest_map->skb = src_map->skb;
3011         pci_unmap_addr_set(dest_map, mapping,
3012                            pci_unmap_addr(src_map, mapping));
3013         dest_desc->addr_hi = src_desc->addr_hi;
3014         dest_desc->addr_lo = src_desc->addr_lo;
3015
3016         src_map->skb = NULL;
3017 }
3018
3019 #if TG3_VLAN_TAG_USED
3020 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3021 {
3022         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3023 }
3024 #endif
3025
3026 /* The RX ring scheme is composed of multiple rings which post fresh
3027  * buffers to the chip, and one special ring the chip uses to report
3028  * status back to the host.
3029  *
3030  * The special ring reports the status of received packets to the
3031  * host.  The chip does not write into the original descriptor the
3032  * RX buffer was obtained from.  The chip simply takes the original
3033  * descriptor as provided by the host, updates the status and length
3034  * field, then writes this into the next status ring entry.
3035  *
3036  * Each ring the host uses to post buffers to the chip is described
3037  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3038  * it is first placed into the on-chip ram.  When the packet's length
3039  * is known, it walks down the TG3_BDINFO entries to select the ring.
3040  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3041  * which is within the range of the new packet's length is chosen.
3042  *
3043  * The "separate ring for rx status" scheme may sound queer, but it makes
3044  * sense from a cache coherency perspective.  If only the host writes
3045  * to the buffer post rings, and only the chip writes to the rx status
3046  * rings, then cache lines never move beyond shared-modified state.
3047  * If both the host and chip were to write into the same ring, cache line
3048  * eviction could occur since both entities want it in an exclusive state.
3049  */
3050 static int tg3_rx(struct tg3 *tp, int budget)
3051 {
3052         u32 work_mask;
3053         u32 sw_idx = tp->rx_rcb_ptr;
3054         u16 hw_idx;
3055         int received;
3056
3057         hw_idx = tp->hw_status->idx[0].rx_producer;
3058         /*
3059          * We need to order the read of hw_idx and the read of
3060          * the opaque cookie.
3061          */
3062         rmb();
3063         work_mask = 0;
3064         received = 0;
3065         while (sw_idx != hw_idx && budget > 0) {
3066                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3067                 unsigned int len;
3068                 struct sk_buff *skb;
3069                 dma_addr_t dma_addr;
3070                 u32 opaque_key, desc_idx, *post_ptr;
3071
3072                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3073                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3074                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3075                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3076                                                   mapping);
3077                         skb = tp->rx_std_buffers[desc_idx].skb;
3078                         post_ptr = &tp->rx_std_ptr;
3079                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3080                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3081                                                   mapping);
3082                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3083                         post_ptr = &tp->rx_jumbo_ptr;
3084                 }
3085                 else {
3086                         goto next_pkt_nopost;
3087                 }
3088
3089                 work_mask |= opaque_key;
3090
3091                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3092                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3093                 drop_it:
3094                         tg3_recycle_rx(tp, opaque_key,
3095                                        desc_idx, *post_ptr);
3096                 drop_it_no_recycle:
3097                         /* Other statistics kept track of by card. */
3098                         tp->net_stats.rx_dropped++;
3099                         goto next_pkt;
3100                 }
3101
3102                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3103
3104                 if (len > RX_COPY_THRESHOLD 
3105                         && tp->rx_offset == 2
3106                         /* rx_offset != 2 iff this is a 5701 card running
3107                          * in PCI-X mode [see tg3_get_invariants()] */
3108                 ) {
3109                         int skb_size;
3110
3111                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3112                                                     desc_idx, *post_ptr);
3113                         if (skb_size < 0)
3114                                 goto drop_it;
3115
3116                         pci_unmap_single(tp->pdev, dma_addr,
3117                                          skb_size - tp->rx_offset,
3118                                          PCI_DMA_FROMDEVICE);
3119
3120                         skb_put(skb, len);
3121                 } else {
3122                         struct sk_buff *copy_skb;
3123
3124                         tg3_recycle_rx(tp, opaque_key,
3125                                        desc_idx, *post_ptr);
3126
3127                         copy_skb = dev_alloc_skb(len + 2);
3128                         if (copy_skb == NULL)
3129                                 goto drop_it_no_recycle;
3130
3131                         copy_skb->dev = tp->dev;
3132                         skb_reserve(copy_skb, 2);
3133                         skb_put(copy_skb, len);
3134                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3135                         memcpy(copy_skb->data, skb->data, len);
3136                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3137
3138                         /* We'll reuse the original ring buffer. */
3139                         skb = copy_skb;
3140                 }
3141
3142                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3143                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3144                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3145                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3146                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3147                 else
3148                         skb->ip_summed = CHECKSUM_NONE;
3149
3150                 skb->protocol = eth_type_trans(skb, tp->dev);
3151 #if TG3_VLAN_TAG_USED
3152                 if (tp->vlgrp != NULL &&
3153                     desc->type_flags & RXD_FLAG_VLAN) {
3154                         tg3_vlan_rx(tp, skb,
3155                                     desc->err_vlan & RXD_VLAN_MASK);
3156                 } else
3157 #endif
3158                         netif_receive_skb(skb);
3159
3160                 tp->dev->last_rx = jiffies;
3161                 received++;
3162                 budget--;
3163
3164 next_pkt:
3165                 (*post_ptr)++;
3166 next_pkt_nopost:
3167                 sw_idx++;
3168                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3169
3170                 /* Refresh hw_idx to see if there is new work */
3171                 if (sw_idx == hw_idx) {
3172                         hw_idx = tp->hw_status->idx[0].rx_producer;
3173                         rmb();
3174                 }
3175         }
3176
3177         /* ACK the status ring. */
3178         tp->rx_rcb_ptr = sw_idx;
3179         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3180
3181         /* Refill RX ring(s). */
3182         if (work_mask & RXD_OPAQUE_RING_STD) {
3183                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3184                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3185                              sw_idx);
3186         }
3187         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3188                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3189                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3190                              sw_idx);
3191         }
3192         mmiowb();
3193
3194         return received;
3195 }
3196
3197 static int tg3_poll(struct net_device *netdev, int *budget)
3198 {
3199         struct tg3 *tp = netdev_priv(netdev);
3200         struct tg3_hw_status *sblk = tp->hw_status;
3201         int done;
3202
3203         /* handle link change and other phy events */
3204         if (!(tp->tg3_flags &
3205               (TG3_FLAG_USE_LINKCHG_REG |
3206                TG3_FLAG_POLL_SERDES))) {
3207                 if (sblk->status & SD_STATUS_LINK_CHG) {
3208                         sblk->status = SD_STATUS_UPDATED |
3209                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3210                         spin_lock(&tp->lock);
3211                         tg3_setup_phy(tp, 0);
3212                         spin_unlock(&tp->lock);
3213                 }
3214         }
3215
3216         /* run TX completion thread */
3217         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3218                 tg3_tx(tp);
3219         }
3220
3221         /* run RX thread, within the bounds set by NAPI.
3222          * All RX "locking" is done by ensuring outside
3223          * code synchronizes with dev->poll()
3224          */
3225         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3226                 int orig_budget = *budget;
3227                 int work_done;
3228
3229                 if (orig_budget > netdev->quota)
3230                         orig_budget = netdev->quota;
3231
3232                 work_done = tg3_rx(tp, orig_budget);
3233
3234                 *budget -= work_done;
3235                 netdev->quota -= work_done;
3236         }
3237
3238         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3239                 tp->last_tag = sblk->status_tag;
3240                 rmb();
3241         } else
3242                 sblk->status &= ~SD_STATUS_UPDATED;
3243
3244         /* if no more work, tell net stack and NIC we're done */
3245         done = !tg3_has_work(tp);
3246         if (done) {
3247                 netif_rx_complete(netdev);
3248                 tg3_restart_ints(tp);
3249         }
3250
3251         return (done ? 0 : 1);
3252 }
3253
3254 static void tg3_irq_quiesce(struct tg3 *tp)
3255 {
3256         BUG_ON(tp->irq_sync);
3257
3258         tp->irq_sync = 1;
3259         smp_mb();
3260
3261         synchronize_irq(tp->pdev->irq);
3262 }
3263
3264 static inline int tg3_irq_sync(struct tg3 *tp)
3265 {
3266         return tp->irq_sync;
3267 }
3268
3269 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3270  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3271  * with as well.  Most of the time, this is not necessary except when
3272  * shutting down the device.
3273  */
3274 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3275 {
3276         if (irq_sync)
3277                 tg3_irq_quiesce(tp);
3278         spin_lock_bh(&tp->lock);
3279         spin_lock(&tp->tx_lock);
3280 }
3281
3282 static inline void tg3_full_unlock(struct tg3 *tp)
3283 {
3284         spin_unlock(&tp->tx_lock);
3285         spin_unlock_bh(&tp->lock);
3286 }
3287
3288 /* MSI ISR - No need to check for interrupt sharing and no need to
3289  * flush status block and interrupt mailbox. PCI ordering rules
3290  * guarantee that MSI will arrive after the status block.
3291  */
3292 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3293 {
3294         struct net_device *dev = dev_id;
3295         struct tg3 *tp = netdev_priv(dev);
3296
3297         prefetch(tp->hw_status);
3298         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3299         /*
3300          * Writing any value to intr-mbox-0 clears PCI INTA# and
3301          * chip-internal interrupt pending events.
3302          * Writing non-zero to intr-mbox-0 additional tells the
3303          * NIC to stop sending us irqs, engaging "in-intr-handler"
3304          * event coalescing.
3305          */
3306         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3307         if (likely(!tg3_irq_sync(tp)))
3308                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3309
3310         return IRQ_RETVAL(1);
3311 }
3312
3313 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3314 {
3315         struct net_device *dev = dev_id;
3316         struct tg3 *tp = netdev_priv(dev);
3317         struct tg3_hw_status *sblk = tp->hw_status;
3318         unsigned int handled = 1;
3319
3320         /* In INTx mode, it is possible for the interrupt to arrive at
3321          * the CPU before the status block posted prior to the interrupt.
3322          * Reading the PCI State register will confirm whether the
3323          * interrupt is ours and will flush the status block.
3324          */
3325         if ((sblk->status & SD_STATUS_UPDATED) ||
3326             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3327                 /*
3328                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3329                  * chip-internal interrupt pending events.
3330                  * Writing non-zero to intr-mbox-0 additional tells the
3331                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3332                  * event coalescing.
3333                  */
3334                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3335                              0x00000001);
3336                 if (tg3_irq_sync(tp))
3337                         goto out;
3338                 sblk->status &= ~SD_STATUS_UPDATED;
3339                 if (likely(tg3_has_work(tp))) {
3340                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3341                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3342                 } else {
3343                         /* No work, shared interrupt perhaps?  re-enable
3344                          * interrupts, and flush that PCI write
3345                          */
3346                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3347                                 0x00000000);
3348                 }
3349         } else {        /* shared interrupt */
3350                 handled = 0;
3351         }
3352 out:
3353         return IRQ_RETVAL(handled);
3354 }
3355
3356 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3357 {
3358         struct net_device *dev = dev_id;
3359         struct tg3 *tp = netdev_priv(dev);
3360         struct tg3_hw_status *sblk = tp->hw_status;
3361         unsigned int handled = 1;
3362
3363         /* In INTx mode, it is possible for the interrupt to arrive at
3364          * the CPU before the status block posted prior to the interrupt.
3365          * Reading the PCI State register will confirm whether the
3366          * interrupt is ours and will flush the status block.
3367          */
3368         if ((sblk->status_tag != tp->last_tag) ||
3369             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3370                 /*
3371                  * writing any value to intr-mbox-0 clears PCI INTA# and
3372                  * chip-internal interrupt pending events.
3373                  * writing non-zero to intr-mbox-0 additional tells the
3374                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3375                  * event coalescing.
3376                  */
3377                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3378                              0x00000001);
3379                 if (tg3_irq_sync(tp))
3380                         goto out;
3381                 if (netif_rx_schedule_prep(dev)) {
3382                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3383                         /* Update last_tag to mark that this status has been
3384                          * seen. Because interrupt may be shared, we may be
3385                          * racing with tg3_poll(), so only update last_tag
3386                          * if tg3_poll() is not scheduled.
3387                          */
3388                         tp->last_tag = sblk->status_tag;
3389                         __netif_rx_schedule(dev);
3390                 }
3391         } else {        /* shared interrupt */
3392                 handled = 0;
3393         }
3394 out:
3395         return IRQ_RETVAL(handled);
3396 }
3397
3398 /* ISR for interrupt test */
3399 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3400                 struct pt_regs *regs)
3401 {
3402         struct net_device *dev = dev_id;
3403         struct tg3 *tp = netdev_priv(dev);
3404         struct tg3_hw_status *sblk = tp->hw_status;
3405
3406         if ((sblk->status & SD_STATUS_UPDATED) ||
3407             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3408                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3409                              0x00000001);
3410                 return IRQ_RETVAL(1);
3411         }
3412         return IRQ_RETVAL(0);
3413 }
3414
3415 static int tg3_init_hw(struct tg3 *);
3416 static int tg3_halt(struct tg3 *, int, int);
3417
3418 #ifdef CONFIG_NET_POLL_CONTROLLER
3419 static void tg3_poll_controller(struct net_device *dev)
3420 {
3421         struct tg3 *tp = netdev_priv(dev);
3422
3423         tg3_interrupt(tp->pdev->irq, dev, NULL);
3424 }
3425 #endif
3426
3427 static void tg3_reset_task(void *_data)
3428 {
3429         struct tg3 *tp = _data;
3430         unsigned int restart_timer;
3431
3432         tg3_netif_stop(tp);
3433
3434         tg3_full_lock(tp, 1);
3435
3436         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3437         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3438
3439         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3440         tg3_init_hw(tp);
3441
3442         tg3_netif_start(tp);
3443
3444         tg3_full_unlock(tp);
3445
3446         if (restart_timer)
3447                 mod_timer(&tp->timer, jiffies + 1);
3448 }
3449
3450 static void tg3_tx_timeout(struct net_device *dev)
3451 {
3452         struct tg3 *tp = netdev_priv(dev);
3453
3454         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3455                dev->name);
3456
3457         schedule_work(&tp->reset_task);
3458 }
3459
3460 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3461 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3462 {
3463         u32 base = (u32) mapping & 0xffffffff;
3464
3465         return ((base > 0xffffdcc0) &&
3466                 (base + len + 8 < base));
3467 }
3468
3469 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3470
3471 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3472                                        u32 last_plus_one, u32 *start,
3473                                        u32 base_flags, u32 mss)
3474 {
3475         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3476         dma_addr_t new_addr = 0;
3477         u32 entry = *start;
3478         int i, ret = 0;
3479
3480         if (!new_skb) {
3481                 ret = -1;
3482         } else {
3483                 /* New SKB is guaranteed to be linear. */
3484                 entry = *start;
3485                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3486                                           PCI_DMA_TODEVICE);
3487                 /* Make sure new skb does not cross any 4G boundaries.
3488                  * Drop the packet if it does.
3489                  */
3490                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3491                         ret = -1;
3492                         dev_kfree_skb(new_skb);
3493                         new_skb = NULL;
3494                 } else {
3495                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3496                                     base_flags, 1 | (mss << 1));
3497                         *start = NEXT_TX(entry);
3498                 }
3499         }
3500
3501         /* Now clean up the sw ring entries. */
3502         i = 0;
3503         while (entry != last_plus_one) {
3504                 int len;
3505
3506                 if (i == 0)
3507                         len = skb_headlen(skb);
3508                 else
3509                         len = skb_shinfo(skb)->frags[i-1].size;
3510                 pci_unmap_single(tp->pdev,
3511                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3512                                  len, PCI_DMA_TODEVICE);
3513                 if (i == 0) {
3514                         tp->tx_buffers[entry].skb = new_skb;
3515                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3516                 } else {
3517                         tp->tx_buffers[entry].skb = NULL;
3518                 }
3519                 entry = NEXT_TX(entry);
3520                 i++;
3521         }
3522
3523         dev_kfree_skb(skb);
3524
3525         return ret;
3526 }
3527
3528 static void tg3_set_txd(struct tg3 *tp, int entry,
3529                         dma_addr_t mapping, int len, u32 flags,
3530                         u32 mss_and_is_end)
3531 {
3532         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3533         int is_end = (mss_and_is_end & 0x1);
3534         u32 mss = (mss_and_is_end >> 1);
3535         u32 vlan_tag = 0;
3536
3537         if (is_end)
3538                 flags |= TXD_FLAG_END;
3539         if (flags & TXD_FLAG_VLAN) {
3540                 vlan_tag = flags >> 16;
3541                 flags &= 0xffff;
3542         }
3543         vlan_tag |= (mss << TXD_MSS_SHIFT);
3544
3545         txd->addr_hi = ((u64) mapping >> 32);
3546         txd->addr_lo = ((u64) mapping & 0xffffffff);
3547         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3548         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3549 }
3550
3551 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3552 {
3553         struct tg3 *tp = netdev_priv(dev);
3554         dma_addr_t mapping;
3555         u32 len, entry, base_flags, mss;
3556         int would_hit_hwbug;
3557
3558         len = skb_headlen(skb);
3559
3560         /* No BH disabling for tx_lock here.  We are running in BH disabled
3561          * context and TX reclaim runs via tp->poll inside of a software
3562          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3563          * no IRQ context deadlocks to worry about either.  Rejoice!
3564          */
3565         if (!spin_trylock(&tp->tx_lock))
3566                 return NETDEV_TX_LOCKED; 
3567
3568         /* This is a hard error, log it. */
3569         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3570                 netif_stop_queue(dev);
3571                 spin_unlock(&tp->tx_lock);
3572                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3573                        dev->name);
3574                 return NETDEV_TX_BUSY;
3575         }
3576
3577         entry = tp->tx_prod;
3578         base_flags = 0;
3579         if (skb->ip_summed == CHECKSUM_HW)
3580                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3581 #if TG3_TSO_SUPPORT != 0
3582         mss = 0;
3583         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3584             (mss = skb_shinfo(skb)->tso_size) != 0) {
3585                 int tcp_opt_len, ip_tcp_len;
3586
3587                 if (skb_header_cloned(skb) &&
3588                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3589                         dev_kfree_skb(skb);
3590                         goto out_unlock;
3591                 }
3592
3593                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3594                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3595
3596                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3597                                TXD_FLAG_CPU_POST_DMA);
3598
3599                 skb->nh.iph->check = 0;
3600                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3601                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3602                         skb->h.th->check = 0;
3603                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3604                 }
3605                 else {
3606                         skb->h.th->check =
3607                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3608                                                    skb->nh.iph->daddr,
3609                                                    0, IPPROTO_TCP, 0);
3610                 }
3611
3612                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3613                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3614                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3615                                 int tsflags;
3616
3617                                 tsflags = ((skb->nh.iph->ihl - 5) +
3618                                            (tcp_opt_len >> 2));
3619                                 mss |= (tsflags << 11);
3620                         }
3621                 } else {
3622                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3623                                 int tsflags;
3624
3625                                 tsflags = ((skb->nh.iph->ihl - 5) +
3626                                            (tcp_opt_len >> 2));
3627                                 base_flags |= tsflags << 12;
3628                         }
3629                 }
3630         }
3631 #else
3632         mss = 0;
3633 #endif
3634 #if TG3_VLAN_TAG_USED
3635         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3636                 base_flags |= (TXD_FLAG_VLAN |
3637                                (vlan_tx_tag_get(skb) << 16));
3638 #endif
3639
3640         /* Queue skb data, a.k.a. the main skb fragment. */
3641         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3642
3643         tp->tx_buffers[entry].skb = skb;
3644         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3645
3646         would_hit_hwbug = 0;
3647
3648         if (tg3_4g_overflow_test(mapping, len))
3649                 would_hit_hwbug = 1;
3650
3651         tg3_set_txd(tp, entry, mapping, len, base_flags,
3652                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3653
3654         entry = NEXT_TX(entry);
3655
3656         /* Now loop through additional data fragments, and queue them. */
3657         if (skb_shinfo(skb)->nr_frags > 0) {
3658                 unsigned int i, last;
3659
3660                 last = skb_shinfo(skb)->nr_frags - 1;
3661                 for (i = 0; i <= last; i++) {
3662                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3663
3664                         len = frag->size;
3665                         mapping = pci_map_page(tp->pdev,
3666                                                frag->page,
3667                                                frag->page_offset,
3668                                                len, PCI_DMA_TODEVICE);
3669
3670                         tp->tx_buffers[entry].skb = NULL;
3671                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3672
3673                         if (tg3_4g_overflow_test(mapping, len))
3674                                 would_hit_hwbug = 1;
3675
3676                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3677                                 tg3_set_txd(tp, entry, mapping, len,
3678                                             base_flags, (i == last)|(mss << 1));
3679                         else
3680                                 tg3_set_txd(tp, entry, mapping, len,
3681                                             base_flags, (i == last));
3682
3683                         entry = NEXT_TX(entry);
3684                 }
3685         }
3686
3687         if (would_hit_hwbug) {
3688                 u32 last_plus_one = entry;
3689                 u32 start;
3690
3691                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3692                 start &= (TG3_TX_RING_SIZE - 1);
3693
3694                 /* If the workaround fails due to memory/mapping
3695                  * failure, silently drop this packet.
3696                  */
3697                 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3698                                                 &start, base_flags, mss))
3699                         goto out_unlock;
3700
3701                 entry = start;
3702         }
3703
3704         /* Packets are ready, update Tx producer idx local and on card. */
3705         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3706
3707         tp->tx_prod = entry;
3708         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3709                 netif_stop_queue(dev);
3710                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3711                         netif_wake_queue(tp->dev);
3712         }
3713
3714 out_unlock:
3715         mmiowb();
3716         spin_unlock(&tp->tx_lock);
3717
3718         dev->trans_start = jiffies;
3719
3720         return NETDEV_TX_OK;
3721 }
3722
3723 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3724                                int new_mtu)
3725 {
3726         dev->mtu = new_mtu;
3727
3728         if (new_mtu > ETH_DATA_LEN) {
3729                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3730                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3731                         ethtool_op_set_tso(dev, 0);
3732                 }
3733                 else
3734                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3735         } else {
3736                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3737                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3738                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3739         }
3740 }
3741
3742 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3743 {
3744         struct tg3 *tp = netdev_priv(dev);
3745
3746         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3747                 return -EINVAL;
3748
3749         if (!netif_running(dev)) {
3750                 /* We'll just catch it later when the
3751                  * device is up'd.
3752                  */
3753                 tg3_set_mtu(dev, tp, new_mtu);
3754                 return 0;
3755         }
3756
3757         tg3_netif_stop(tp);
3758
3759         tg3_full_lock(tp, 1);
3760
3761         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3762
3763         tg3_set_mtu(dev, tp, new_mtu);
3764
3765         tg3_init_hw(tp);
3766
3767         tg3_netif_start(tp);
3768
3769         tg3_full_unlock(tp);
3770
3771         return 0;
3772 }
3773
3774 /* Free up pending packets in all rx/tx rings.
3775  *
3776  * The chip has been shut down and the driver detached from
3777  * the networking, so no interrupts or new tx packets will
3778  * end up in the driver.  tp->{tx,}lock is not held and we are not
3779  * in an interrupt context and thus may sleep.
3780  */
3781 static void tg3_free_rings(struct tg3 *tp)
3782 {
3783         struct ring_info *rxp;
3784         int i;
3785
3786         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3787                 rxp = &tp->rx_std_buffers[i];
3788
3789                 if (rxp->skb == NULL)
3790                         continue;
3791                 pci_unmap_single(tp->pdev,
3792                                  pci_unmap_addr(rxp, mapping),
3793                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3794                                  PCI_DMA_FROMDEVICE);
3795                 dev_kfree_skb_any(rxp->skb);
3796                 rxp->skb = NULL;
3797         }
3798
3799         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3800                 rxp = &tp->rx_jumbo_buffers[i];
3801
3802                 if (rxp->skb == NULL)
3803                         continue;
3804                 pci_unmap_single(tp->pdev,
3805                                  pci_unmap_addr(rxp, mapping),
3806                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3807                                  PCI_DMA_FROMDEVICE);
3808                 dev_kfree_skb_any(rxp->skb);
3809                 rxp->skb = NULL;
3810         }
3811
3812         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3813                 struct tx_ring_info *txp;
3814                 struct sk_buff *skb;
3815                 int j;
3816
3817                 txp = &tp->tx_buffers[i];
3818                 skb = txp->skb;
3819
3820                 if (skb == NULL) {
3821                         i++;
3822                         continue;
3823                 }
3824
3825                 pci_unmap_single(tp->pdev,
3826                                  pci_unmap_addr(txp, mapping),
3827                                  skb_headlen(skb),
3828                                  PCI_DMA_TODEVICE);
3829                 txp->skb = NULL;
3830
3831                 i++;
3832
3833                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3834                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3835                         pci_unmap_page(tp->pdev,
3836                                        pci_unmap_addr(txp, mapping),
3837                                        skb_shinfo(skb)->frags[j].size,
3838                                        PCI_DMA_TODEVICE);
3839                         i++;
3840                 }
3841
3842                 dev_kfree_skb_any(skb);
3843         }
3844 }
3845
3846 /* Initialize tx/rx rings for packet processing.
3847  *
3848  * The chip has been shut down and the driver detached from
3849  * the networking, so no interrupts or new tx packets will
3850  * end up in the driver.  tp->{tx,}lock are held and thus
3851  * we may not sleep.
3852  */
3853 static void tg3_init_rings(struct tg3 *tp)
3854 {
3855         u32 i;
3856
3857         /* Free up all the SKBs. */
3858         tg3_free_rings(tp);
3859
3860         /* Zero out all descriptors. */
3861         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3862         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3863         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3864         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3865
3866         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3867         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3868             (tp->dev->mtu > ETH_DATA_LEN))
3869                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3870
3871         /* Initialize invariants of the rings, we only set this
3872          * stuff once.  This works because the card does not
3873          * write into the rx buffer posting rings.
3874          */
3875         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3876                 struct tg3_rx_buffer_desc *rxd;
3877
3878                 rxd = &tp->rx_std[i];
3879                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3880                         << RXD_LEN_SHIFT;
3881                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3882                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3883                                (i << RXD_OPAQUE_INDEX_SHIFT));
3884         }
3885
3886         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3887                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3888                         struct tg3_rx_buffer_desc *rxd;
3889
3890                         rxd = &tp->rx_jumbo[i];
3891                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3892                                 << RXD_LEN_SHIFT;
3893                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3894                                 RXD_FLAG_JUMBO;
3895                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3896                                (i << RXD_OPAQUE_INDEX_SHIFT));
3897                 }
3898         }
3899
3900         /* Now allocate fresh SKBs for each rx ring. */
3901         for (i = 0; i < tp->rx_pending; i++) {
3902                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3903                                      -1, i) < 0)
3904                         break;
3905         }
3906
3907         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3908                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3909                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3910                                              -1, i) < 0)
3911                                 break;
3912                 }
3913         }
3914 }
3915
3916 /*
3917  * Must not be invoked with interrupt sources disabled and
3918  * the hardware shutdown down.
3919  */
3920 static void tg3_free_consistent(struct tg3 *tp)
3921 {
3922         kfree(tp->rx_std_buffers);
3923         tp->rx_std_buffers = NULL;
3924         if (tp->rx_std) {
3925                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3926                                     tp->rx_std, tp->rx_std_mapping);
3927                 tp->rx_std = NULL;
3928         }
3929         if (tp->rx_jumbo) {
3930                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3931                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3932                 tp->rx_jumbo = NULL;
3933         }
3934         if (tp->rx_rcb) {
3935                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3936                                     tp->rx_rcb, tp->rx_rcb_mapping);
3937                 tp->rx_rcb = NULL;
3938         }
3939         if (tp->tx_ring) {
3940                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3941                         tp->tx_ring, tp->tx_desc_mapping);
3942                 tp->tx_ring = NULL;
3943         }
3944         if (tp->hw_status) {
3945                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3946                                     tp->hw_status, tp->status_mapping);
3947                 tp->hw_status = NULL;
3948         }
3949         if (tp->hw_stats) {
3950                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3951                                     tp->hw_stats, tp->stats_mapping);
3952                 tp->hw_stats = NULL;
3953         }
3954 }
3955
3956 /*
3957  * Must not be invoked with interrupt sources disabled and
3958  * the hardware shutdown down.  Can sleep.
3959  */
3960 static int tg3_alloc_consistent(struct tg3 *tp)
3961 {
3962         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3963                                       (TG3_RX_RING_SIZE +
3964                                        TG3_RX_JUMBO_RING_SIZE)) +
3965                                      (sizeof(struct tx_ring_info) *
3966                                       TG3_TX_RING_SIZE),
3967                                      GFP_KERNEL);
3968         if (!tp->rx_std_buffers)
3969                 return -ENOMEM;
3970
3971         memset(tp->rx_std_buffers, 0,
3972                (sizeof(struct ring_info) *
3973                 (TG3_RX_RING_SIZE +
3974                  TG3_RX_JUMBO_RING_SIZE)) +
3975                (sizeof(struct tx_ring_info) *
3976                 TG3_TX_RING_SIZE));
3977
3978         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3979         tp->tx_buffers = (struct tx_ring_info *)
3980                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3981
3982         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3983                                           &tp->rx_std_mapping);
3984         if (!tp->rx_std)
3985                 goto err_out;
3986
3987         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3988                                             &tp->rx_jumbo_mapping);
3989
3990         if (!tp->rx_jumbo)
3991                 goto err_out;
3992
3993         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3994                                           &tp->rx_rcb_mapping);
3995         if (!tp->rx_rcb)
3996                 goto err_out;
3997
3998         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3999                                            &tp->tx_desc_mapping);
4000         if (!tp->tx_ring)
4001                 goto err_out;
4002
4003         tp->hw_status = pci_alloc_consistent(tp->pdev,
4004                                              TG3_HW_STATUS_SIZE,
4005                                              &tp->status_mapping);
4006         if (!tp->hw_status)
4007                 goto err_out;
4008
4009         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4010                                             sizeof(struct tg3_hw_stats),
4011                                             &tp->stats_mapping);
4012         if (!tp->hw_stats)
4013                 goto err_out;
4014
4015         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4016         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4017
4018         return 0;
4019
4020 err_out:
4021         tg3_free_consistent(tp);
4022         return -ENOMEM;
4023 }
4024
4025 #define MAX_WAIT_CNT 1000
4026
4027 /* To stop a block, clear the enable bit and poll till it
4028  * clears.  tp->lock is held.
4029  */
4030 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4031 {
4032         unsigned int i;
4033         u32 val;
4034
4035         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4036                 switch (ofs) {
4037                 case RCVLSC_MODE:
4038                 case DMAC_MODE:
4039                 case MBFREE_MODE:
4040                 case BUFMGR_MODE:
4041                 case MEMARB_MODE:
4042                         /* We can't enable/disable these bits of the
4043                          * 5705/5750, just say success.
4044                          */
4045                         return 0;
4046
4047                 default:
4048                         break;
4049                 };
4050         }
4051
4052         val = tr32(ofs);
4053         val &= ~enable_bit;
4054         tw32_f(ofs, val);
4055
4056         for (i = 0; i < MAX_WAIT_CNT; i++) {
4057                 udelay(100);
4058                 val = tr32(ofs);
4059                 if ((val & enable_bit) == 0)
4060                         break;
4061         }
4062
4063         if (i == MAX_WAIT_CNT && !silent) {
4064                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4065                        "ofs=%lx enable_bit=%x\n",
4066                        ofs, enable_bit);
4067                 return -ENODEV;
4068         }
4069
4070         return 0;
4071 }
4072
4073 /* tp->lock is held. */
4074 static int tg3_abort_hw(struct tg3 *tp, int silent)
4075 {
4076         int i, err;
4077
4078         tg3_disable_ints(tp);
4079
4080         tp->rx_mode &= ~RX_MODE_ENABLE;
4081         tw32_f(MAC_RX_MODE, tp->rx_mode);
4082         udelay(10);
4083
4084         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4085         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4086         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4087         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4088         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4089         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4090
4091         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4092         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4093         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4094         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4095         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4096         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4097         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4098
4099         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4100         tw32_f(MAC_MODE, tp->mac_mode);
4101         udelay(40);
4102
4103         tp->tx_mode &= ~TX_MODE_ENABLE;
4104         tw32_f(MAC_TX_MODE, tp->tx_mode);
4105
4106         for (i = 0; i < MAX_WAIT_CNT; i++) {
4107                 udelay(100);
4108                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4109                         break;
4110         }
4111         if (i >= MAX_WAIT_CNT) {
4112                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4113                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4114                        tp->dev->name, tr32(MAC_TX_MODE));
4115                 err |= -ENODEV;
4116         }
4117
4118         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4119         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4120         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4121
4122         tw32(FTQ_RESET, 0xffffffff);
4123         tw32(FTQ_RESET, 0x00000000);
4124
4125         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4126         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4127
4128         if (tp->hw_status)
4129                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4130         if (tp->hw_stats)
4131                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4132
4133         return err;
4134 }
4135
4136 /* tp->lock is held. */
4137 static int tg3_nvram_lock(struct tg3 *tp)
4138 {
4139         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4140                 int i;
4141
4142                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4143                 for (i = 0; i < 8000; i++) {
4144                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4145                                 break;
4146                         udelay(20);
4147                 }
4148                 if (i == 8000)
4149                         return -ENODEV;
4150         }
4151         return 0;
4152 }
4153
4154 /* tp->lock is held. */
4155 static void tg3_nvram_unlock(struct tg3 *tp)
4156 {
4157         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4158                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4159 }
4160
4161 /* tp->lock is held. */
4162 static void tg3_enable_nvram_access(struct tg3 *tp)
4163 {
4164         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4165             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4166                 u32 nvaccess = tr32(NVRAM_ACCESS);
4167
4168                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4169         }
4170 }
4171
4172 /* tp->lock is held. */
4173 static void tg3_disable_nvram_access(struct tg3 *tp)
4174 {
4175         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4176             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4177                 u32 nvaccess = tr32(NVRAM_ACCESS);
4178
4179                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4180         }
4181 }
4182
4183 /* tp->lock is held. */
4184 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4185 {
4186         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4187                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4188                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4189
4190         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4191                 switch (kind) {
4192                 case RESET_KIND_INIT:
4193                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4194                                       DRV_STATE_START);
4195                         break;
4196
4197                 case RESET_KIND_SHUTDOWN:
4198                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4199                                       DRV_STATE_UNLOAD);
4200                         break;
4201
4202                 case RESET_KIND_SUSPEND:
4203                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4204                                       DRV_STATE_SUSPEND);
4205                         break;
4206
4207                 default:
4208                         break;
4209                 };
4210         }
4211 }
4212
4213 /* tp->lock is held. */
4214 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4215 {
4216         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4217                 switch (kind) {
4218                 case RESET_KIND_INIT:
4219                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4220                                       DRV_STATE_START_DONE);
4221                         break;
4222
4223                 case RESET_KIND_SHUTDOWN:
4224                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4225                                       DRV_STATE_UNLOAD_DONE);
4226                         break;
4227
4228                 default:
4229                         break;
4230                 };
4231         }
4232 }
4233
4234 /* tp->lock is held. */
4235 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4236 {
4237         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4238                 switch (kind) {
4239                 case RESET_KIND_INIT:
4240                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4241                                       DRV_STATE_START);
4242                         break;
4243
4244                 case RESET_KIND_SHUTDOWN:
4245                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4246                                       DRV_STATE_UNLOAD);
4247                         break;
4248
4249                 case RESET_KIND_SUSPEND:
4250                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4251                                       DRV_STATE_SUSPEND);
4252                         break;
4253
4254                 default:
4255                         break;
4256                 };
4257         }
4258 }
4259
4260 static void tg3_stop_fw(struct tg3 *);
4261
4262 /* tp->lock is held. */
4263 static int tg3_chip_reset(struct tg3 *tp)
4264 {
4265         u32 val;
4266         void (*write_op)(struct tg3 *, u32, u32);
4267         int i;
4268
4269         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4270                 tg3_nvram_lock(tp);
4271
4272         /*
4273          * We must avoid the readl() that normally takes place.
4274          * It locks machines, causes machine checks, and other
4275          * fun things.  So, temporarily disable the 5701
4276          * hardware workaround, while we do the reset.
4277          */
4278         write_op = tp->write32;
4279         if (write_op == tg3_write_flush_reg32)
4280                 tp->write32 = tg3_write32;
4281
4282         /* do the reset */
4283         val = GRC_MISC_CFG_CORECLK_RESET;
4284
4285         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4286                 if (tr32(0x7e2c) == 0x60) {
4287                         tw32(0x7e2c, 0x20);
4288                 }
4289                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4290                         tw32(GRC_MISC_CFG, (1 << 29));
4291                         val |= (1 << 29);
4292                 }
4293         }
4294
4295         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4296                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4297         tw32(GRC_MISC_CFG, val);
4298
4299         /* restore 5701 hardware bug workaround write method */
4300         tp->write32 = write_op;
4301
4302         /* Unfortunately, we have to delay before the PCI read back.
4303          * Some 575X chips even will not respond to a PCI cfg access
4304          * when the reset command is given to the chip.
4305          *
4306          * How do these hardware designers expect things to work
4307          * properly if the PCI write is posted for a long period
4308          * of time?  It is always necessary to have some method by
4309          * which a register read back can occur to push the write
4310          * out which does the reset.
4311          *
4312          * For most tg3 variants the trick below was working.
4313          * Ho hum...
4314          */
4315         udelay(120);
4316
4317         /* Flush PCI posted writes.  The normal MMIO registers
4318          * are inaccessible at this time so this is the only
4319          * way to make this reliably (actually, this is no longer
4320          * the case, see above).  I tried to use indirect
4321          * register read/write but this upset some 5701 variants.
4322          */
4323         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4324
4325         udelay(120);
4326
4327         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4328                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4329                         int i;
4330                         u32 cfg_val;
4331
4332                         /* Wait for link training to complete.  */
4333                         for (i = 0; i < 5000; i++)
4334                                 udelay(100);
4335
4336                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4337                         pci_write_config_dword(tp->pdev, 0xc4,
4338                                                cfg_val | (1 << 15));
4339                 }
4340                 /* Set PCIE max payload size and clear error status.  */
4341                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4342         }
4343
4344         /* Re-enable indirect register accesses. */
4345         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4346                                tp->misc_host_ctrl);
4347
4348         /* Set MAX PCI retry to zero. */
4349         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4350         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4351             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4352                 val |= PCISTATE_RETRY_SAME_DMA;
4353         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4354
4355         pci_restore_state(tp->pdev);
4356
4357         /* Make sure PCI-X relaxed ordering bit is clear. */
4358         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4359         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4360         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4361
4362         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4363                 u32 val;
4364
4365                 /* Chip reset on 5780 will reset MSI enable bit,
4366                  * so need to restore it.
4367                  */
4368                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4369                         u16 ctrl;
4370
4371                         pci_read_config_word(tp->pdev,
4372                                              tp->msi_cap + PCI_MSI_FLAGS,
4373                                              &ctrl);
4374                         pci_write_config_word(tp->pdev,
4375                                               tp->msi_cap + PCI_MSI_FLAGS,
4376                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4377                         val = tr32(MSGINT_MODE);
4378                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4379                 }
4380
4381                 val = tr32(MEMARB_MODE);
4382                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4383
4384         } else
4385                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4386
4387         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4388                 tg3_stop_fw(tp);
4389                 tw32(0x5000, 0x400);
4390         }
4391
4392         tw32(GRC_MODE, tp->grc_mode);
4393
4394         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4395                 u32 val = tr32(0xc4);
4396
4397                 tw32(0xc4, val | (1 << 15));
4398         }
4399
4400         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4401             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4402                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4403                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4404                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4405                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4406         }
4407
4408         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4409                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4410                 tw32_f(MAC_MODE, tp->mac_mode);
4411         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4412                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4413                 tw32_f(MAC_MODE, tp->mac_mode);
4414         } else
4415                 tw32_f(MAC_MODE, 0);
4416         udelay(40);
4417
4418         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4419                 /* Wait for firmware initialization to complete. */
4420                 for (i = 0; i < 100000; i++) {
4421                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4422                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4423                                 break;
4424                         udelay(10);
4425                 }
4426                 if (i >= 100000) {
4427                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4428                                "firmware will not restart magic=%08x\n",
4429                                tp->dev->name, val);
4430                         return -ENODEV;
4431                 }
4432         }
4433
4434         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4435             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4436                 u32 val = tr32(0x7c00);
4437
4438                 tw32(0x7c00, val | (1 << 25));
4439         }
4440
4441         /* Reprobe ASF enable state.  */
4442         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4443         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4444         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4445         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4446                 u32 nic_cfg;
4447
4448                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4449                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4450                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4451                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4452                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4453                 }
4454         }
4455
4456         return 0;
4457 }
4458
4459 /* tp->lock is held. */
4460 static void tg3_stop_fw(struct tg3 *tp)
4461 {
4462         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4463                 u32 val;
4464                 int i;
4465
4466                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4467                 val = tr32(GRC_RX_CPU_EVENT);
4468                 val |= (1 << 14);
4469                 tw32(GRC_RX_CPU_EVENT, val);
4470
4471                 /* Wait for RX cpu to ACK the event.  */
4472                 for (i = 0; i < 100; i++) {
4473                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4474                                 break;
4475                         udelay(1);
4476                 }
4477         }
4478 }
4479
4480 /* tp->lock is held. */
4481 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4482 {
4483         int err;
4484
4485         tg3_stop_fw(tp);
4486
4487         tg3_write_sig_pre_reset(tp, kind);
4488
4489         tg3_abort_hw(tp, silent);
4490         err = tg3_chip_reset(tp);
4491
4492         tg3_write_sig_legacy(tp, kind);
4493         tg3_write_sig_post_reset(tp, kind);
4494
4495         if (err)
4496                 return err;
4497
4498         return 0;
4499 }
4500
4501 #define TG3_FW_RELEASE_MAJOR    0x0
4502 #define TG3_FW_RELASE_MINOR     0x0
4503 #define TG3_FW_RELEASE_FIX      0x0
4504 #define TG3_FW_START_ADDR       0x08000000
4505 #define TG3_FW_TEXT_ADDR        0x08000000
4506 #define TG3_FW_TEXT_LEN         0x9c0
4507 #define TG3_FW_RODATA_ADDR      0x080009c0
4508 #define TG3_FW_RODATA_LEN       0x60
4509 #define TG3_FW_DATA_ADDR        0x08000a40
4510 #define TG3_FW_DATA_LEN         0x20
4511 #define TG3_FW_SBSS_ADDR        0x08000a60
4512 #define TG3_FW_SBSS_LEN         0xc
4513 #define TG3_FW_BSS_ADDR         0x08000a70
4514 #define TG3_FW_BSS_LEN          0x10
4515
4516 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4517         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4518         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4519         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4520         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4521         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4522         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4523         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4524         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4525         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4526         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4527         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4528         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4529         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4530         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4531         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4532         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4533         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4534         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4535         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4536         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4537         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4538         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4539         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4540         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4541         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4542         0, 0, 0, 0, 0, 0,
4543         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4544         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4545         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4546         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4547         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4548         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4549         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4550         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4551         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4552         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4553         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4554         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4555         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4556         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4557         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4558         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4559         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4560         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4561         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4562         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4563         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4564         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4565         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4566         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4567         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4568         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4569         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4570         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4571         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4572         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4573         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4574         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4575         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4576         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4577         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4578         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4579         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4580         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4581         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4582         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4583         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4584         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4585         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4586         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4587         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4588         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4589         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4590         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4591         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4592         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4593         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4594         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4595         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4596         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4597         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4598         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4599         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4600         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4601         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4602         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4603         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4604         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4605         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4606         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4607         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4608 };
4609
4610 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4611         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4612         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4613         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4614         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4615         0x00000000
4616 };
4617
4618 #if 0 /* All zeros, don't eat up space with it. */
4619 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4620         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4621         0x00000000, 0x00000000, 0x00000000, 0x00000000
4622 };
4623 #endif
4624
4625 #define RX_CPU_SCRATCH_BASE     0x30000
4626 #define RX_CPU_SCRATCH_SIZE     0x04000
4627 #define TX_CPU_SCRATCH_BASE     0x34000
4628 #define TX_CPU_SCRATCH_SIZE     0x04000
4629
4630 /* tp->lock is held. */
4631 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4632 {
4633         int i;
4634
4635         if (offset == TX_CPU_BASE &&
4636             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4637                 BUG();
4638
4639         if (offset == RX_CPU_BASE) {
4640                 for (i = 0; i < 10000; i++) {
4641                         tw32(offset + CPU_STATE, 0xffffffff);
4642                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4643                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4644                                 break;
4645                 }
4646
4647                 tw32(offset + CPU_STATE, 0xffffffff);
4648                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4649                 udelay(10);
4650         } else {
4651                 for (i = 0; i < 10000; i++) {
4652                         tw32(offset + CPU_STATE, 0xffffffff);
4653                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4654                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4655                                 break;
4656                 }
4657         }
4658
4659         if (i >= 10000) {
4660                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4661                        "and %s CPU\n",
4662                        tp->dev->name,
4663                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4664                 return -ENODEV;
4665         }
4666         return 0;
4667 }
4668
4669 struct fw_info {
4670         unsigned int text_base;
4671         unsigned int text_len;
4672         u32 *text_data;
4673         unsigned int rodata_base;
4674         unsigned int rodata_len;
4675         u32 *rodata_data;
4676         unsigned int data_base;
4677         unsigned int data_len;
4678         u32 *data_data;
4679 };
4680
4681 /* tp->lock is held. */
4682 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4683                                  int cpu_scratch_size, struct fw_info *info)
4684 {
4685         int err, i;
4686         void (*write_op)(struct tg3 *, u32, u32);
4687
4688         if (cpu_base == TX_CPU_BASE &&
4689             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4690                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4691                        "TX cpu firmware on %s which is 5705.\n",
4692                        tp->dev->name);
4693                 return -EINVAL;
4694         }
4695
4696         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4697                 write_op = tg3_write_mem;
4698         else
4699                 write_op = tg3_write_indirect_reg32;
4700
4701         /* It is possible that bootcode is still loading at this point.
4702          * Get the nvram lock first before halting the cpu.
4703          */
4704         tg3_nvram_lock(tp);
4705         err = tg3_halt_cpu(tp, cpu_base);
4706         tg3_nvram_unlock(tp);
4707         if (err)
4708                 goto out;
4709
4710         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4711                 write_op(tp, cpu_scratch_base + i, 0);
4712         tw32(cpu_base + CPU_STATE, 0xffffffff);
4713         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4714         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4715                 write_op(tp, (cpu_scratch_base +
4716                               (info->text_base & 0xffff) +
4717                               (i * sizeof(u32))),
4718                          (info->text_data ?
4719                           info->text_data[i] : 0));
4720         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4721                 write_op(tp, (cpu_scratch_base +
4722                               (info->rodata_base & 0xffff) +
4723                               (i * sizeof(u32))),
4724                          (info->rodata_data ?
4725                           info->rodata_data[i] : 0));
4726         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4727                 write_op(tp, (cpu_scratch_base +
4728                               (info->data_base & 0xffff) +
4729                               (i * sizeof(u32))),
4730                          (info->data_data ?
4731                           info->data_data[i] : 0));
4732
4733         err = 0;
4734
4735 out:
4736         return err;
4737 }
4738
4739 /* tp->lock is held. */
4740 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4741 {
4742         struct fw_info info;
4743         int err, i;
4744
4745         info.text_base = TG3_FW_TEXT_ADDR;
4746         info.text_len = TG3_FW_TEXT_LEN;
4747         info.text_data = &tg3FwText[0];
4748         info.rodata_base = TG3_FW_RODATA_ADDR;
4749         info.rodata_len = TG3_FW_RODATA_LEN;
4750         info.rodata_data = &tg3FwRodata[0];
4751         info.data_base = TG3_FW_DATA_ADDR;
4752         info.data_len = TG3_FW_DATA_LEN;
4753         info.data_data = NULL;
4754
4755         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4756                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4757                                     &info);
4758         if (err)
4759                 return err;
4760
4761         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4762                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4763                                     &info);
4764         if (err)
4765                 return err;
4766
4767         /* Now startup only the RX cpu. */
4768         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4769         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4770
4771         for (i = 0; i < 5; i++) {
4772                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4773                         break;
4774                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4775                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4776                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4777                 udelay(1000);
4778         }
4779         if (i >= 5) {
4780                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4781                        "to set RX CPU PC, is %08x should be %08x\n",
4782                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4783                        TG3_FW_TEXT_ADDR);
4784                 return -ENODEV;
4785         }
4786         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4787         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4788
4789         return 0;
4790 }
4791
4792 #if TG3_TSO_SUPPORT != 0
4793
4794 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4795 #define TG3_TSO_FW_RELASE_MINOR         0x6
4796 #define TG3_TSO_FW_RELEASE_FIX          0x0
4797 #define TG3_TSO_FW_START_ADDR           0x08000000
4798 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4799 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4800 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4801 #define TG3_TSO_FW_RODATA_LEN           0x60
4802 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4803 #define TG3_TSO_FW_DATA_LEN             0x30
4804 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4805 #define TG3_TSO_FW_SBSS_LEN             0x2c
4806 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4807 #define TG3_TSO_FW_BSS_LEN              0x894
4808
4809 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4810         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4811         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4812         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4813         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4814         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4815         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4816         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4817         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4818         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4819         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4820         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4821         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4822         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4823         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4824         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4825         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4826         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4827         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4828         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4829         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4830         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4831         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4832         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4833         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4834         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4835         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4836         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4837         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4838         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4839         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4840         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4841         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4842         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4843         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4844         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4845         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4846         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4847         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4848         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4849         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4850         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4851         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4852         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4853         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4854         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4855         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4856         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4857         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4858         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4859         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4860         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4861         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4862         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4863         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4864         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4865         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4866         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4867         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4868         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4869         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4870         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4871         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4872         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4873         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4874         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4875         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4876         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4877         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4878         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4879         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4880         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4881         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4882         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4883         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4884         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4885         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4886         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4887         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4888         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4889         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4890         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4891         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4892         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4893         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4894         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4895         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4896         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4897         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4898         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4899         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4900         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4901         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4902         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4903         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4904         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4905         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4906         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4907         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4908         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4909         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4910         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4911         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4912         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4913         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4914         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4915         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4916         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4917         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4918         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4919         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4920         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4921         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4922         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4923         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4924         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4925         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4926         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4927         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4928         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4929         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4930         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4931         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4932         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4933         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4934         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4935         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4936         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4937         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4938         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4939         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4940         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4941         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4942         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4943         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4944         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4945         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4946         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4947         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4948         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4949         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4950         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4951         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4952         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4953         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4954         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4955         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4956         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4957         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4958         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4959         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4960         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4961         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4962         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4963         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4964         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4965         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4966         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4967         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4968         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4969         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4970         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4971         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4972         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4973         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4974         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4975         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4976         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4977         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4978         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4979         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4980         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4981         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4982         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4983         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4984         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4985         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4986         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4987         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4988         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4989         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4990         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4991         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4992         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4993         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4994         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4995         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4996         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4997         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4998         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4999         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5000         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5001         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5002         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5003         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5004         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5005         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5006         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5007         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5008         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5009         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5010         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5011         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5012         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5013         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5014         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5015         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5016         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5017         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5018         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5019         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5020         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5021         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5022         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5023         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5024         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5025         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5026         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5027         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5028         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5029         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5030         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5031         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5032         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5033         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5034         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5035         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5036         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5037         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5038         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5039         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5040         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5041         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5042         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5043         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5044         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5045         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5046         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5047         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5048         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5049         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5050         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5051         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5052         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5053         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5054         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5055         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5056         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5057         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5058         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5059         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5060         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5061         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5062         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5063         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5064         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5065         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5066         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5067         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5068         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5069         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5070         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5071         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5072         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5073         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5074         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5075         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5076         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5077         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5078         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5079         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5080         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5081         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5082         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5083         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5084         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5085         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5086         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5087         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5088         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5089         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5090         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5091         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5092         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5093         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5094 };
5095
5096 static u32 tg3TsoFwRodata[] = {
5097         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5098         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5099         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5100         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5101         0x00000000,
5102 };
5103
5104 static u32 tg3TsoFwData[] = {
5105         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5106         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5107         0x00000000,
5108 };
5109
5110 /* 5705 needs a special version of the TSO firmware.  */
5111 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5112 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5113 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5114 #define TG3_TSO5_FW_START_ADDR          0x00010000
5115 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5116 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5117 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5118 #define TG3_TSO5_FW_RODATA_LEN          0x50
5119 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5120 #define TG3_TSO5_FW_DATA_LEN            0x20
5121 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5122 #define TG3_TSO5_FW_SBSS_LEN            0x28
5123 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5124 #define TG3_TSO5_FW_BSS_LEN             0x88
5125
5126 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5127         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5128         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5129         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5130         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5131         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5132         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5133         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5134         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5135         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5136         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5137         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5138         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5139         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5140         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5141         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5142         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5143         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5144         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5145         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5146         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5147         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5148         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5149         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5150         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5151         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5152         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5153         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5154         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5155         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5156         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5157         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5158         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5159         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5160         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5161         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5162         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5163         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5164         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5165         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5166         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5167         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5168         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5169         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5170         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5171         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5172         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5173         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5174         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5175         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5176         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5177         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5178         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5179         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5180         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5181         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5182         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5183         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5184         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5185         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5186         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5187         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5188         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5189         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5190         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5191         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5192         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5193         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5194         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5195         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5196         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5197         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5198         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5199         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5200         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5201         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5202         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5203         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5204         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5205         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5206         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5207         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5208         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5209         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5210         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5211         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5212         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5213         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5214         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5215         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5216         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5217         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5218         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5219         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5220         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5221         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5222         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5223         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5224         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5225         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5226         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5227         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5228         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5229         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5230         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5231         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5232         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5233         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5234         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5235         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5236         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5237         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5238         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5239         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5240         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5241         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5242         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5243         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5244         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5245         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5246         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5247         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5248         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5249         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5250         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5251         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5252         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5253         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5254         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5255         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5256         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5257         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5258         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5259         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5260         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5261         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5262         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5263         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5264         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5265         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5266         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5267         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5268         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5269         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5270         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5271         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5272         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5273         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5274         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5275         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5276         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5277         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5278         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5279         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5280         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5281         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5282         0x00000000, 0x00000000, 0x00000000,
5283 };
5284
5285 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5286         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5287         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5288         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5289         0x00000000, 0x00000000, 0x00000000,
5290 };
5291
5292 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5293         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5294         0x00000000, 0x00000000, 0x00000000,
5295 };
5296
5297 /* tp->lock is held. */
5298 static int tg3_load_tso_firmware(struct tg3 *tp)
5299 {
5300         struct fw_info info;
5301         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5302         int err, i;
5303
5304         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5305                 return 0;
5306
5307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5308                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5309                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5310                 info.text_data = &tg3Tso5FwText[0];
5311                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5312                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5313                 info.rodata_data = &tg3Tso5FwRodata[0];
5314                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5315                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5316                 info.data_data = &tg3Tso5FwData[0];
5317                 cpu_base = RX_CPU_BASE;
5318                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5319                 cpu_scratch_size = (info.text_len +
5320                                     info.rodata_len +
5321                                     info.data_len +
5322                                     TG3_TSO5_FW_SBSS_LEN +
5323                                     TG3_TSO5_FW_BSS_LEN);
5324         } else {
5325                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5326                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5327                 info.text_data = &tg3TsoFwText[0];
5328                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5329                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5330                 info.rodata_data = &tg3TsoFwRodata[0];
5331                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5332                 info.data_len = TG3_TSO_FW_DATA_LEN;
5333                 info.data_data = &tg3TsoFwData[0];
5334                 cpu_base = TX_CPU_BASE;
5335                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5336                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5337         }
5338
5339         err = tg3_load_firmware_cpu(tp, cpu_base,
5340                                     cpu_scratch_base, cpu_scratch_size,
5341                                     &info);
5342         if (err)
5343                 return err;
5344
5345         /* Now startup the cpu. */
5346         tw32(cpu_base + CPU_STATE, 0xffffffff);
5347         tw32_f(cpu_base + CPU_PC,    info.text_base);
5348
5349         for (i = 0; i < 5; i++) {
5350                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5351                         break;
5352                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5353                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5354                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5355                 udelay(1000);
5356         }
5357         if (i >= 5) {
5358                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5359                        "to set CPU PC, is %08x should be %08x\n",
5360                        tp->dev->name, tr32(cpu_base + CPU_PC),
5361                        info.text_base);
5362                 return -ENODEV;
5363         }
5364         tw32(cpu_base + CPU_STATE, 0xffffffff);
5365         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5366         return 0;
5367 }
5368
5369 #endif /* TG3_TSO_SUPPORT != 0 */
5370
5371 /* tp->lock is held. */
5372 static void __tg3_set_mac_addr(struct tg3 *tp)
5373 {
5374         u32 addr_high, addr_low;
5375         int i;
5376
5377         addr_high = ((tp->dev->dev_addr[0] << 8) |
5378                      tp->dev->dev_addr[1]);
5379         addr_low = ((tp->dev->dev_addr[2] << 24) |
5380                     (tp->dev->dev_addr[3] << 16) |
5381                     (tp->dev->dev_addr[4] <<  8) |
5382                     (tp->dev->dev_addr[5] <<  0));
5383         for (i = 0; i < 4; i++) {
5384                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5385                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5386         }
5387
5388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5389             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5390                 for (i = 0; i < 12; i++) {
5391                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5392                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5393                 }
5394         }
5395
5396         addr_high = (tp->dev->dev_addr[0] +
5397                      tp->dev->dev_addr[1] +
5398                      tp->dev->dev_addr[2] +
5399                      tp->dev->dev_addr[3] +
5400                      tp->dev->dev_addr[4] +
5401                      tp->dev->dev_addr[5]) &
5402                 TX_BACKOFF_SEED_MASK;
5403         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5404 }
5405
5406 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5407 {
5408         struct tg3 *tp = netdev_priv(dev);
5409         struct sockaddr *addr = p;
5410
5411         if (!is_valid_ether_addr(addr->sa_data))
5412                 return -EINVAL;
5413
5414         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5415
5416         spin_lock_bh(&tp->lock);
5417         __tg3_set_mac_addr(tp);
5418         spin_unlock_bh(&tp->lock);
5419
5420         return 0;
5421 }
5422
5423 /* tp->lock is held. */
5424 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5425                            dma_addr_t mapping, u32 maxlen_flags,
5426                            u32 nic_addr)
5427 {
5428         tg3_write_mem(tp,
5429                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5430                       ((u64) mapping >> 32));
5431         tg3_write_mem(tp,
5432                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5433                       ((u64) mapping & 0xffffffff));
5434         tg3_write_mem(tp,
5435                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5436                        maxlen_flags);
5437
5438         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5439                 tg3_write_mem(tp,
5440                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5441                               nic_addr);
5442 }
5443
5444 static void __tg3_set_rx_mode(struct net_device *);
5445 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5446 {
5447         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5448         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5449         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5450         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5451         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5452                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5453                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5454         }
5455         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5456         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5457         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5458                 u32 val = ec->stats_block_coalesce_usecs;
5459
5460                 if (!netif_carrier_ok(tp->dev))
5461                         val = 0;
5462
5463                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5464         }
5465 }
5466
5467 /* tp->lock is held. */
5468 static int tg3_reset_hw(struct tg3 *tp)
5469 {
5470         u32 val, rdmac_mode;
5471         int i, err, limit;
5472
5473         tg3_disable_ints(tp);
5474
5475         tg3_stop_fw(tp);
5476
5477         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5478
5479         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5480                 tg3_abort_hw(tp, 1);
5481         }
5482
5483         err = tg3_chip_reset(tp);
5484         if (err)
5485                 return err;
5486
5487         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5488
5489         /* This works around an issue with Athlon chipsets on
5490          * B3 tigon3 silicon.  This bit has no effect on any
5491          * other revision.  But do not set this on PCI Express
5492          * chips.
5493          */
5494         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5495                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5496         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5497
5498         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5499             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5500                 val = tr32(TG3PCI_PCISTATE);
5501                 val |= PCISTATE_RETRY_SAME_DMA;
5502                 tw32(TG3PCI_PCISTATE, val);
5503         }
5504
5505         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5506                 /* Enable some hw fixes.  */
5507                 val = tr32(TG3PCI_MSI_DATA);
5508                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5509                 tw32(TG3PCI_MSI_DATA, val);
5510         }
5511
5512         /* Descriptor ring init may make accesses to the
5513          * NIC SRAM area to setup the TX descriptors, so we
5514          * can only do this after the hardware has been
5515          * successfully reset.
5516          */
5517         tg3_init_rings(tp);
5518
5519         /* This value is determined during the probe time DMA
5520          * engine test, tg3_test_dma.
5521          */
5522         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5523
5524         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5525                           GRC_MODE_4X_NIC_SEND_RINGS |
5526                           GRC_MODE_NO_TX_PHDR_CSUM |
5527                           GRC_MODE_NO_RX_PHDR_CSUM);
5528         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5529         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5530                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5531         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5532                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5533
5534         tw32(GRC_MODE,
5535              tp->grc_mode |
5536              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5537
5538         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5539         val = tr32(GRC_MISC_CFG);
5540         val &= ~0xff;
5541         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5542         tw32(GRC_MISC_CFG, val);
5543
5544         /* Initialize MBUF/DESC pool. */
5545         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5546                 /* Do nothing.  */
5547         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5548                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5549                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5550                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5551                 else
5552                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5553                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5554                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5555         }
5556 #if TG3_TSO_SUPPORT != 0
5557         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5558                 int fw_len;
5559
5560                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5561                           TG3_TSO5_FW_RODATA_LEN +
5562                           TG3_TSO5_FW_DATA_LEN +
5563                           TG3_TSO5_FW_SBSS_LEN +
5564                           TG3_TSO5_FW_BSS_LEN);
5565                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5566                 tw32(BUFMGR_MB_POOL_ADDR,
5567                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5568                 tw32(BUFMGR_MB_POOL_SIZE,
5569                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5570         }
5571 #endif
5572
5573         if (tp->dev->mtu <= ETH_DATA_LEN) {
5574                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5575                      tp->bufmgr_config.mbuf_read_dma_low_water);
5576                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5577                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5578                 tw32(BUFMGR_MB_HIGH_WATER,
5579                      tp->bufmgr_config.mbuf_high_water);
5580         } else {
5581                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5582                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5583                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5584                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5585                 tw32(BUFMGR_MB_HIGH_WATER,
5586                      tp->bufmgr_config.mbuf_high_water_jumbo);
5587         }
5588         tw32(BUFMGR_DMA_LOW_WATER,
5589              tp->bufmgr_config.dma_low_water);
5590         tw32(BUFMGR_DMA_HIGH_WATER,
5591              tp->bufmgr_config.dma_high_water);
5592
5593         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5594         for (i = 0; i < 2000; i++) {
5595                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5596                         break;
5597                 udelay(10);
5598         }
5599         if (i >= 2000) {
5600                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5601                        tp->dev->name);
5602                 return -ENODEV;
5603         }
5604
5605         /* Setup replenish threshold. */
5606         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5607
5608         /* Initialize TG3_BDINFO's at:
5609          *  RCVDBDI_STD_BD:     standard eth size rx ring
5610          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5611          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5612          *
5613          * like so:
5614          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5615          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5616          *                              ring attribute flags
5617          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5618          *
5619          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5620          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5621          *
5622          * The size of each ring is fixed in the firmware, but the location is
5623          * configurable.
5624          */
5625         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5626              ((u64) tp->rx_std_mapping >> 32));
5627         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5628              ((u64) tp->rx_std_mapping & 0xffffffff));
5629         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5630              NIC_SRAM_RX_BUFFER_DESC);
5631
5632         /* Don't even try to program the JUMBO/MINI buffer descriptor
5633          * configs on 5705.
5634          */
5635         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5636                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5637                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5638         } else {
5639                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5640                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5641
5642                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5643                      BDINFO_FLAGS_DISABLED);
5644
5645                 /* Setup replenish threshold. */
5646                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5647
5648                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5649                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5650                              ((u64) tp->rx_jumbo_mapping >> 32));
5651                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5652                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5653                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5654                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5655                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5656                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5657                 } else {
5658                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5659                              BDINFO_FLAGS_DISABLED);
5660                 }
5661
5662         }
5663
5664         /* There is only one send ring on 5705/5750, no need to explicitly
5665          * disable the others.
5666          */
5667         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5668                 /* Clear out send RCB ring in SRAM. */
5669                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5670                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5671                                       BDINFO_FLAGS_DISABLED);
5672         }
5673
5674         tp->tx_prod = 0;
5675         tp->tx_cons = 0;
5676         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5677         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5678
5679         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5680                        tp->tx_desc_mapping,
5681                        (TG3_TX_RING_SIZE <<
5682                         BDINFO_FLAGS_MAXLEN_SHIFT),
5683                        NIC_SRAM_TX_BUFFER_DESC);
5684
5685         /* There is only one receive return ring on 5705/5750, no need
5686          * to explicitly disable the others.
5687          */
5688         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5689                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5690                      i += TG3_BDINFO_SIZE) {
5691                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5692                                       BDINFO_FLAGS_DISABLED);
5693                 }
5694         }
5695
5696         tp->rx_rcb_ptr = 0;
5697         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5698
5699         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5700                        tp->rx_rcb_mapping,
5701                        (TG3_RX_RCB_RING_SIZE(tp) <<
5702                         BDINFO_FLAGS_MAXLEN_SHIFT),
5703                        0);
5704
5705         tp->rx_std_ptr = tp->rx_pending;
5706         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5707                      tp->rx_std_ptr);
5708
5709         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5710                                                 tp->rx_jumbo_pending : 0;
5711         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5712                      tp->rx_jumbo_ptr);
5713
5714         /* Initialize MAC address and backoff seed. */
5715         __tg3_set_mac_addr(tp);
5716
5717         /* MTU + ethernet header + FCS + optional VLAN tag */
5718         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5719
5720         /* The slot time is changed by tg3_setup_phy if we
5721          * run at gigabit with half duplex.
5722          */
5723         tw32(MAC_TX_LENGTHS,
5724              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5725              (6 << TX_LENGTHS_IPG_SHIFT) |
5726              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5727
5728         /* Receive rules. */
5729         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5730         tw32(RCVLPC_CONFIG, 0x0181);
5731
5732         /* Calculate RDMAC_MODE setting early, we need it to determine
5733          * the RCVLPC_STATE_ENABLE mask.
5734          */
5735         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5736                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5737                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5738                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5739                       RDMAC_MODE_LNGREAD_ENAB);
5740         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5741                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5742
5743         /* If statement applies to 5705 and 5750 PCI devices only */
5744         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5745              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5746             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5747                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5748                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5749                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5750                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5751                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5752                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5753                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5754                 }
5755         }
5756
5757         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5758                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5759
5760 #if TG3_TSO_SUPPORT != 0
5761         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5762                 rdmac_mode |= (1 << 27);
5763 #endif
5764
5765         /* Receive/send statistics. */
5766         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5767             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5768                 val = tr32(RCVLPC_STATS_ENABLE);
5769                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5770                 tw32(RCVLPC_STATS_ENABLE, val);
5771         } else {
5772                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5773         }
5774         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5775         tw32(SNDDATAI_STATSENAB, 0xffffff);
5776         tw32(SNDDATAI_STATSCTRL,
5777              (SNDDATAI_SCTRL_ENABLE |
5778               SNDDATAI_SCTRL_FASTUPD));
5779
5780         /* Setup host coalescing engine. */
5781         tw32(HOSTCC_MODE, 0);
5782         for (i = 0; i < 2000; i++) {
5783                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5784                         break;
5785                 udelay(10);
5786         }
5787
5788         __tg3_set_coalesce(tp, &tp->coal);
5789
5790         /* set status block DMA address */
5791         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5792              ((u64) tp->status_mapping >> 32));
5793         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5794              ((u64) tp->status_mapping & 0xffffffff));
5795
5796         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5797                 /* Status/statistics block address.  See tg3_timer,
5798                  * the tg3_periodic_fetch_stats call there, and
5799                  * tg3_get_stats to see how this works for 5705/5750 chips.
5800                  */
5801                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5802                      ((u64) tp->stats_mapping >> 32));
5803                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5804                      ((u64) tp->stats_mapping & 0xffffffff));
5805                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5806                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5807         }
5808
5809         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5810
5811         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5812         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5813         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5814                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5815
5816         /* Clear statistics/status block in chip, and status block in ram. */
5817         for (i = NIC_SRAM_STATS_BLK;
5818              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5819              i += sizeof(u32)) {
5820                 tg3_write_mem(tp, i, 0);
5821                 udelay(40);
5822         }
5823         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5824
5825         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5826                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5827                 /* reset to prevent losing 1st rx packet intermittently */
5828                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5829                 udelay(10);
5830         }
5831
5832         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5833                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5834         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5835         udelay(40);
5836
5837         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5838          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5839          * register to preserve the GPIO settings for LOMs. The GPIOs,
5840          * whether used as inputs or outputs, are set by boot code after
5841          * reset.
5842          */
5843         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5844                 u32 gpio_mask;
5845
5846                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5847                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5848
5849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5850                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5851                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5852
5853                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5854
5855                 /* GPIO1 must be driven high for eeprom write protect */
5856                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5857                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5858         }
5859         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5860         udelay(100);
5861
5862         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5863         tp->last_tag = 0;
5864
5865         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5866                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5867                 udelay(40);
5868         }
5869
5870         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5871                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5872                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5873                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5874                WDMAC_MODE_LNGREAD_ENAB);
5875
5876         /* If statement applies to 5705 and 5750 PCI devices only */
5877         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5878              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5880                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5881                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5882                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5883                         /* nothing */
5884                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5885                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5886                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5887                         val |= WDMAC_MODE_RX_ACCEL;
5888                 }
5889         }
5890
5891         tw32_f(WDMAC_MODE, val);
5892         udelay(40);
5893
5894         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5895                 val = tr32(TG3PCI_X_CAPS);
5896                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5897                         val &= ~PCIX_CAPS_BURST_MASK;
5898                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5899                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5900                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5901                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5902                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5903                                 val |= (tp->split_mode_max_reqs <<
5904                                         PCIX_CAPS_SPLIT_SHIFT);
5905                 }
5906                 tw32(TG3PCI_X_CAPS, val);
5907         }
5908
5909         tw32_f(RDMAC_MODE, rdmac_mode);
5910         udelay(40);
5911
5912         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5913         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5914                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5915         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5916         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5917         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5918         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5919         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5920 #if TG3_TSO_SUPPORT != 0
5921         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5922                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5923 #endif
5924         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5925         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5926
5927         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5928                 err = tg3_load_5701_a0_firmware_fix(tp);
5929                 if (err)
5930                         return err;
5931         }
5932
5933 #if TG3_TSO_SUPPORT != 0
5934         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5935                 err = tg3_load_tso_firmware(tp);
5936                 if (err)
5937                         return err;
5938         }
5939 #endif
5940
5941         tp->tx_mode = TX_MODE_ENABLE;
5942         tw32_f(MAC_TX_MODE, tp->tx_mode);
5943         udelay(100);
5944
5945         tp->rx_mode = RX_MODE_ENABLE;
5946         tw32_f(MAC_RX_MODE, tp->rx_mode);
5947         udelay(10);
5948
5949         if (tp->link_config.phy_is_low_power) {
5950                 tp->link_config.phy_is_low_power = 0;
5951                 tp->link_config.speed = tp->link_config.orig_speed;
5952                 tp->link_config.duplex = tp->link_config.orig_duplex;
5953                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5954         }
5955
5956         tp->mi_mode = MAC_MI_MODE_BASE;
5957         tw32_f(MAC_MI_MODE, tp->mi_mode);
5958         udelay(80);
5959
5960         tw32(MAC_LED_CTRL, tp->led_ctrl);
5961
5962         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5963         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5964                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5965                 udelay(10);
5966         }
5967         tw32_f(MAC_RX_MODE, tp->rx_mode);
5968         udelay(10);
5969
5970         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5971                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5972                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5973                         /* Set drive transmission level to 1.2V  */
5974                         /* only if the signal pre-emphasis bit is not set  */
5975                         val = tr32(MAC_SERDES_CFG);
5976                         val &= 0xfffff000;
5977                         val |= 0x880;
5978                         tw32(MAC_SERDES_CFG, val);
5979                 }
5980                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5981                         tw32(MAC_SERDES_CFG, 0x616000);
5982         }
5983
5984         /* Prevent chip from dropping frames when flow control
5985          * is enabled.
5986          */
5987         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5988
5989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5990             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5991                 /* Use hardware link auto-negotiation */
5992                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5993         }
5994
5995         err = tg3_setup_phy(tp, 1);
5996         if (err)
5997                 return err;
5998
5999         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6000                 u32 tmp;
6001
6002                 /* Clear CRC stats. */
6003                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6004                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6005                         tg3_readphy(tp, 0x14, &tmp);
6006                 }
6007         }
6008
6009         __tg3_set_rx_mode(tp->dev);
6010
6011         /* Initialize receive rules. */
6012         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6013         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6014         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6015         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6016
6017         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6018             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6019                 limit = 8;
6020         else
6021                 limit = 16;
6022         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6023                 limit -= 4;
6024         switch (limit) {
6025         case 16:
6026                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6027         case 15:
6028                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6029         case 14:
6030                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6031         case 13:
6032                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6033         case 12:
6034                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6035         case 11:
6036                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6037         case 10:
6038                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6039         case 9:
6040                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6041         case 8:
6042                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6043         case 7:
6044                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6045         case 6:
6046                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6047         case 5:
6048                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6049         case 4:
6050                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6051         case 3:
6052                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6053         case 2:
6054         case 1:
6055
6056         default:
6057                 break;
6058         };
6059
6060         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6061
6062         return 0;
6063 }
6064
6065 /* Called at device open time to get the chip ready for
6066  * packet processing.  Invoked with tp->lock held.
6067  */
6068 static int tg3_init_hw(struct tg3 *tp)
6069 {
6070         int err;
6071
6072         /* Force the chip into D0. */
6073         err = tg3_set_power_state(tp, 0);
6074         if (err)
6075                 goto out;
6076
6077         tg3_switch_clocks(tp);
6078
6079         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6080
6081         err = tg3_reset_hw(tp);
6082
6083 out:
6084         return err;
6085 }
6086
6087 #define TG3_STAT_ADD32(PSTAT, REG) \
6088 do {    u32 __val = tr32(REG); \
6089         (PSTAT)->low += __val; \
6090         if ((PSTAT)->low < __val) \
6091                 (PSTAT)->high += 1; \
6092 } while (0)
6093
6094 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6095 {
6096         struct tg3_hw_stats *sp = tp->hw_stats;
6097
6098         if (!netif_carrier_ok(tp->dev))
6099                 return;
6100
6101         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6102         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6103         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6104         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6105         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6106         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6107         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6108         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6109         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6110         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6111         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6112         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6113         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6114
6115         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6116         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6117         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6118         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6119         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6120         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6121         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6122         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6123         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6124         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6125         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6126         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6127         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6128         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6129 }
6130
6131 static void tg3_timer(unsigned long __opaque)
6132 {
6133         struct tg3 *tp = (struct tg3 *) __opaque;
6134
6135         spin_lock(&tp->lock);
6136
6137         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6138                 /* All of this garbage is because when using non-tagged
6139                  * IRQ status the mailbox/status_block protocol the chip
6140                  * uses with the cpu is race prone.
6141                  */
6142                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6143                         tw32(GRC_LOCAL_CTRL,
6144                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6145                 } else {
6146                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6147                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6148                 }
6149
6150                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6151                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6152                         spin_unlock(&tp->lock);
6153                         schedule_work(&tp->reset_task);
6154                         return;
6155                 }
6156         }
6157
6158         /* This part only runs once per second. */
6159         if (!--tp->timer_counter) {
6160                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6161                         tg3_periodic_fetch_stats(tp);
6162
6163                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6164                         u32 mac_stat;
6165                         int phy_event;
6166
6167                         mac_stat = tr32(MAC_STATUS);
6168
6169                         phy_event = 0;
6170                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6171                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6172                                         phy_event = 1;
6173                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6174                                 phy_event = 1;
6175
6176                         if (phy_event)
6177                                 tg3_setup_phy(tp, 0);
6178                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6179                         u32 mac_stat = tr32(MAC_STATUS);
6180                         int need_setup = 0;
6181
6182                         if (netif_carrier_ok(tp->dev) &&
6183                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6184                                 need_setup = 1;
6185                         }
6186                         if (! netif_carrier_ok(tp->dev) &&
6187                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6188                                          MAC_STATUS_SIGNAL_DET))) {
6189                                 need_setup = 1;
6190                         }
6191                         if (need_setup) {
6192                                 tw32_f(MAC_MODE,
6193                                      (tp->mac_mode &
6194                                       ~MAC_MODE_PORT_MODE_MASK));
6195                                 udelay(40);
6196                                 tw32_f(MAC_MODE, tp->mac_mode);
6197                                 udelay(40);
6198                                 tg3_setup_phy(tp, 0);
6199                         }
6200                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6201                         tg3_serdes_parallel_detect(tp);
6202
6203                 tp->timer_counter = tp->timer_multiplier;
6204         }
6205
6206         /* Heartbeat is only sent once every 2 seconds.  */
6207         if (!--tp->asf_counter) {
6208                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6209                         u32 val;
6210
6211                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6212                                            FWCMD_NICDRV_ALIVE2);
6213                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6214                         /* 5 seconds timeout */
6215                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6216                         val = tr32(GRC_RX_CPU_EVENT);
6217                         val |= (1 << 14);
6218                         tw32(GRC_RX_CPU_EVENT, val);
6219                 }
6220                 tp->asf_counter = tp->asf_multiplier;
6221         }
6222
6223         spin_unlock(&tp->lock);
6224
6225         tp->timer.expires = jiffies + tp->timer_offset;
6226         add_timer(&tp->timer);
6227 }
6228
6229 static int tg3_test_interrupt(struct tg3 *tp)
6230 {
6231         struct net_device *dev = tp->dev;
6232         int err, i;
6233         u32 int_mbox = 0;
6234
6235         if (!netif_running(dev))
6236                 return -ENODEV;
6237
6238         tg3_disable_ints(tp);
6239
6240         free_irq(tp->pdev->irq, dev);
6241
6242         err = request_irq(tp->pdev->irq, tg3_test_isr,
6243                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6244         if (err)
6245                 return err;
6246
6247         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6248         tg3_enable_ints(tp);
6249
6250         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6251                HOSTCC_MODE_NOW);
6252
6253         for (i = 0; i < 5; i++) {
6254                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6255                                         TG3_64BIT_REG_LOW);
6256                 if (int_mbox != 0)
6257                         break;
6258                 msleep(10);
6259         }
6260
6261         tg3_disable_ints(tp);
6262
6263         free_irq(tp->pdev->irq, dev);
6264         
6265         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6266                 err = request_irq(tp->pdev->irq, tg3_msi,
6267                                   SA_SAMPLE_RANDOM, dev->name, dev);
6268         else {
6269                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6270                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6271                         fn = tg3_interrupt_tagged;
6272                 err = request_irq(tp->pdev->irq, fn,
6273                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6274         }
6275
6276         if (err)
6277                 return err;
6278
6279         if (int_mbox != 0)
6280                 return 0;
6281
6282         return -EIO;
6283 }
6284
6285 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6286  * successfully restored
6287  */
6288 static int tg3_test_msi(struct tg3 *tp)
6289 {
6290         struct net_device *dev = tp->dev;
6291         int err;
6292         u16 pci_cmd;
6293
6294         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6295                 return 0;
6296
6297         /* Turn off SERR reporting in case MSI terminates with Master
6298          * Abort.
6299          */
6300         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6301         pci_write_config_word(tp->pdev, PCI_COMMAND,
6302                               pci_cmd & ~PCI_COMMAND_SERR);
6303
6304         err = tg3_test_interrupt(tp);
6305
6306         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6307
6308         if (!err)
6309                 return 0;
6310
6311         /* other failures */
6312         if (err != -EIO)
6313                 return err;
6314
6315         /* MSI test failed, go back to INTx mode */
6316         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6317                "switching to INTx mode. Please report this failure to "
6318                "the PCI maintainer and include system chipset information.\n",
6319                        tp->dev->name);
6320
6321         free_irq(tp->pdev->irq, dev);
6322         pci_disable_msi(tp->pdev);
6323
6324         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6325
6326         {
6327                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6328                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6329                         fn = tg3_interrupt_tagged;
6330
6331                 err = request_irq(tp->pdev->irq, fn,
6332                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6333         }
6334         if (err)
6335                 return err;
6336
6337         /* Need to reset the chip because the MSI cycle may have terminated
6338          * with Master Abort.
6339          */
6340         tg3_full_lock(tp, 1);
6341
6342         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6343         err = tg3_init_hw(tp);
6344
6345         tg3_full_unlock(tp);
6346
6347         if (err)
6348                 free_irq(tp->pdev->irq, dev);
6349
6350         return err;
6351 }
6352
6353 static int tg3_open(struct net_device *dev)
6354 {
6355         struct tg3 *tp = netdev_priv(dev);
6356         int err;
6357
6358         tg3_full_lock(tp, 0);
6359
6360         tg3_disable_ints(tp);
6361         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6362
6363         tg3_full_unlock(tp);
6364
6365         /* The placement of this call is tied
6366          * to the setup and use of Host TX descriptors.
6367          */
6368         err = tg3_alloc_consistent(tp);
6369         if (err)
6370                 return err;
6371
6372         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6373             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6374             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6375                 /* All MSI supporting chips should support tagged
6376                  * status.  Assert that this is the case.
6377                  */
6378                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6379                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6380                                "Not using MSI.\n", tp->dev->name);
6381                 } else if (pci_enable_msi(tp->pdev) == 0) {
6382                         u32 msi_mode;
6383
6384                         msi_mode = tr32(MSGINT_MODE);
6385                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6386                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6387                 }
6388         }
6389         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6390                 err = request_irq(tp->pdev->irq, tg3_msi,
6391                                   SA_SAMPLE_RANDOM, dev->name, dev);
6392         else {
6393                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6394                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6395                         fn = tg3_interrupt_tagged;
6396
6397                 err = request_irq(tp->pdev->irq, fn,
6398                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6399         }
6400
6401         if (err) {
6402                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6403                         pci_disable_msi(tp->pdev);
6404                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6405                 }
6406                 tg3_free_consistent(tp);
6407                 return err;
6408         }
6409
6410         tg3_full_lock(tp, 0);
6411
6412         err = tg3_init_hw(tp);
6413         if (err) {
6414                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6415                 tg3_free_rings(tp);
6416         } else {
6417                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6418                         tp->timer_offset = HZ;
6419                 else
6420                         tp->timer_offset = HZ / 10;
6421
6422                 BUG_ON(tp->timer_offset > HZ);
6423                 tp->timer_counter = tp->timer_multiplier =
6424                         (HZ / tp->timer_offset);
6425                 tp->asf_counter = tp->asf_multiplier =
6426                         ((HZ / tp->timer_offset) * 2);
6427
6428                 init_timer(&tp->timer);
6429                 tp->timer.expires = jiffies + tp->timer_offset;
6430                 tp->timer.data = (unsigned long) tp;
6431                 tp->timer.function = tg3_timer;
6432         }
6433
6434         tg3_full_unlock(tp);
6435
6436         if (err) {
6437                 free_irq(tp->pdev->irq, dev);
6438                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6439                         pci_disable_msi(tp->pdev);
6440                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6441                 }
6442                 tg3_free_consistent(tp);
6443                 return err;
6444         }
6445
6446         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6447                 err = tg3_test_msi(tp);
6448
6449                 if (err) {
6450                         tg3_full_lock(tp, 0);
6451
6452                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6453                                 pci_disable_msi(tp->pdev);
6454                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6455                         }
6456                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6457                         tg3_free_rings(tp);
6458                         tg3_free_consistent(tp);
6459
6460                         tg3_full_unlock(tp);
6461
6462                         return err;
6463                 }
6464         }
6465
6466         tg3_full_lock(tp, 0);
6467
6468         add_timer(&tp->timer);
6469         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6470         tg3_enable_ints(tp);
6471
6472         tg3_full_unlock(tp);
6473
6474         netif_start_queue(dev);
6475
6476         return 0;
6477 }
6478
6479 #if 0
6480 /*static*/ void tg3_dump_state(struct tg3 *tp)
6481 {
6482         u32 val32, val32_2, val32_3, val32_4, val32_5;
6483         u16 val16;
6484         int i;
6485
6486         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6487         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6488         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6489                val16, val32);
6490
6491         /* MAC block */
6492         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6493                tr32(MAC_MODE), tr32(MAC_STATUS));
6494         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6495                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6496         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6497                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6498         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6499                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6500
6501         /* Send data initiator control block */
6502         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6503                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6504         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6505                tr32(SNDDATAI_STATSCTRL));
6506
6507         /* Send data completion control block */
6508         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6509
6510         /* Send BD ring selector block */
6511         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6512                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6513
6514         /* Send BD initiator control block */
6515         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6516                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6517
6518         /* Send BD completion control block */
6519         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6520
6521         /* Receive list placement control block */
6522         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6523                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6524         printk("       RCVLPC_STATSCTRL[%08x]\n",
6525                tr32(RCVLPC_STATSCTRL));
6526
6527         /* Receive data and receive BD initiator control block */
6528         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6529                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6530
6531         /* Receive data completion control block */
6532         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6533                tr32(RCVDCC_MODE));
6534
6535         /* Receive BD initiator control block */
6536         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6537                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6538
6539         /* Receive BD completion control block */
6540         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6541                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6542
6543         /* Receive list selector control block */
6544         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6545                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6546
6547         /* Mbuf cluster free block */
6548         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6549                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6550
6551         /* Host coalescing control block */
6552         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6553                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6554         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6555                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6556                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6557         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6558                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6559                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6560         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6561                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6562         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6563                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6564
6565         /* Memory arbiter control block */
6566         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6567                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6568
6569         /* Buffer manager control block */
6570         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6571                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6572         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6573                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6574         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6575                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6576                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6577                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6578
6579         /* Read DMA control block */
6580         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6581                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6582
6583         /* Write DMA control block */
6584         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6585                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6586
6587         /* DMA completion block */
6588         printk("DEBUG: DMAC_MODE[%08x]\n",
6589                tr32(DMAC_MODE));
6590
6591         /* GRC block */
6592         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6593                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6594         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6595                tr32(GRC_LOCAL_CTRL));
6596
6597         /* TG3_BDINFOs */
6598         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6599                tr32(RCVDBDI_JUMBO_BD + 0x0),
6600                tr32(RCVDBDI_JUMBO_BD + 0x4),
6601                tr32(RCVDBDI_JUMBO_BD + 0x8),
6602                tr32(RCVDBDI_JUMBO_BD + 0xc));
6603         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6604                tr32(RCVDBDI_STD_BD + 0x0),
6605                tr32(RCVDBDI_STD_BD + 0x4),
6606                tr32(RCVDBDI_STD_BD + 0x8),
6607                tr32(RCVDBDI_STD_BD + 0xc));
6608         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6609                tr32(RCVDBDI_MINI_BD + 0x0),
6610                tr32(RCVDBDI_MINI_BD + 0x4),
6611                tr32(RCVDBDI_MINI_BD + 0x8),
6612                tr32(RCVDBDI_MINI_BD + 0xc));
6613
6614         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6615         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6616         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6617         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6618         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6619                val32, val32_2, val32_3, val32_4);
6620
6621         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6622         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6623         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6624         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6625         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6626                val32, val32_2, val32_3, val32_4);
6627
6628         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6629         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6630         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6631         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6632         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6633         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6634                val32, val32_2, val32_3, val32_4, val32_5);
6635
6636         /* SW status block */
6637         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6638                tp->hw_status->status,
6639                tp->hw_status->status_tag,
6640                tp->hw_status->rx_jumbo_consumer,
6641                tp->hw_status->rx_consumer,
6642                tp->hw_status->rx_mini_consumer,
6643                tp->hw_status->idx[0].rx_producer,
6644                tp->hw_status->idx[0].tx_consumer);
6645
6646         /* SW statistics block */
6647         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6648                ((u32 *)tp->hw_stats)[0],
6649                ((u32 *)tp->hw_stats)[1],
6650                ((u32 *)tp->hw_stats)[2],
6651                ((u32 *)tp->hw_stats)[3]);
6652
6653         /* Mailboxes */
6654         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6655                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6656                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6657                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6658                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6659
6660         /* NIC side send descriptors. */
6661         for (i = 0; i < 6; i++) {
6662                 unsigned long txd;
6663
6664                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6665                         + (i * sizeof(struct tg3_tx_buffer_desc));
6666                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6667                        i,
6668                        readl(txd + 0x0), readl(txd + 0x4),
6669                        readl(txd + 0x8), readl(txd + 0xc));
6670         }
6671
6672         /* NIC side RX descriptors. */
6673         for (i = 0; i < 6; i++) {
6674                 unsigned long rxd;
6675
6676                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6677                         + (i * sizeof(struct tg3_rx_buffer_desc));
6678                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6679                        i,
6680                        readl(rxd + 0x0), readl(rxd + 0x4),
6681                        readl(rxd + 0x8), readl(rxd + 0xc));
6682                 rxd += (4 * sizeof(u32));
6683                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6684                        i,
6685                        readl(rxd + 0x0), readl(rxd + 0x4),
6686                        readl(rxd + 0x8), readl(rxd + 0xc));
6687         }
6688
6689         for (i = 0; i < 6; i++) {
6690                 unsigned long rxd;
6691
6692                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6693                         + (i * sizeof(struct tg3_rx_buffer_desc));
6694                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6695                        i,
6696                        readl(rxd + 0x0), readl(rxd + 0x4),
6697                        readl(rxd + 0x8), readl(rxd + 0xc));
6698                 rxd += (4 * sizeof(u32));
6699                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6700                        i,
6701                        readl(rxd + 0x0), readl(rxd + 0x4),
6702                        readl(rxd + 0x8), readl(rxd + 0xc));
6703         }
6704 }
6705 #endif
6706
6707 static struct net_device_stats *tg3_get_stats(struct net_device *);
6708 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6709
6710 static int tg3_close(struct net_device *dev)
6711 {
6712         struct tg3 *tp = netdev_priv(dev);
6713
6714         netif_stop_queue(dev);
6715
6716         del_timer_sync(&tp->timer);
6717
6718         tg3_full_lock(tp, 1);
6719 #if 0
6720         tg3_dump_state(tp);
6721 #endif
6722
6723         tg3_disable_ints(tp);
6724
6725         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6726         tg3_free_rings(tp);
6727         tp->tg3_flags &=
6728                 ~(TG3_FLAG_INIT_COMPLETE |
6729                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6730         netif_carrier_off(tp->dev);
6731
6732         tg3_full_unlock(tp);
6733
6734         free_irq(tp->pdev->irq, dev);
6735         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6736                 pci_disable_msi(tp->pdev);
6737                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6738         }
6739
6740         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6741                sizeof(tp->net_stats_prev));
6742         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6743                sizeof(tp->estats_prev));
6744
6745         tg3_free_consistent(tp);
6746
6747         return 0;
6748 }
6749
6750 static inline unsigned long get_stat64(tg3_stat64_t *val)
6751 {
6752         unsigned long ret;
6753
6754 #if (BITS_PER_LONG == 32)
6755         ret = val->low;
6756 #else
6757         ret = ((u64)val->high << 32) | ((u64)val->low);
6758 #endif
6759         return ret;
6760 }
6761
6762 static unsigned long calc_crc_errors(struct tg3 *tp)
6763 {
6764         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6765
6766         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6767             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6768              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6769                 u32 val;
6770
6771                 spin_lock_bh(&tp->lock);
6772                 if (!tg3_readphy(tp, 0x1e, &val)) {
6773                         tg3_writephy(tp, 0x1e, val | 0x8000);
6774                         tg3_readphy(tp, 0x14, &val);
6775                 } else
6776                         val = 0;
6777                 spin_unlock_bh(&tp->lock);
6778
6779                 tp->phy_crc_errors += val;
6780
6781                 return tp->phy_crc_errors;
6782         }
6783
6784         return get_stat64(&hw_stats->rx_fcs_errors);
6785 }
6786
6787 #define ESTAT_ADD(member) \
6788         estats->member =        old_estats->member + \
6789                                 get_stat64(&hw_stats->member)
6790
6791 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6792 {
6793         struct tg3_ethtool_stats *estats = &tp->estats;
6794         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6795         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6796
6797         if (!hw_stats)
6798                 return old_estats;
6799
6800         ESTAT_ADD(rx_octets);
6801         ESTAT_ADD(rx_fragments);
6802         ESTAT_ADD(rx_ucast_packets);
6803         ESTAT_ADD(rx_mcast_packets);
6804         ESTAT_ADD(rx_bcast_packets);
6805         ESTAT_ADD(rx_fcs_errors);
6806         ESTAT_ADD(rx_align_errors);
6807         ESTAT_ADD(rx_xon_pause_rcvd);
6808         ESTAT_ADD(rx_xoff_pause_rcvd);
6809         ESTAT_ADD(rx_mac_ctrl_rcvd);
6810         ESTAT_ADD(rx_xoff_entered);
6811         ESTAT_ADD(rx_frame_too_long_errors);
6812         ESTAT_ADD(rx_jabbers);
6813         ESTAT_ADD(rx_undersize_packets);
6814         ESTAT_ADD(rx_in_length_errors);
6815         ESTAT_ADD(rx_out_length_errors);
6816         ESTAT_ADD(rx_64_or_less_octet_packets);
6817         ESTAT_ADD(rx_65_to_127_octet_packets);
6818         ESTAT_ADD(rx_128_to_255_octet_packets);
6819         ESTAT_ADD(rx_256_to_511_octet_packets);
6820         ESTAT_ADD(rx_512_to_1023_octet_packets);
6821         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6822         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6823         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6824         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6825         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6826
6827         ESTAT_ADD(tx_octets);
6828         ESTAT_ADD(tx_collisions);
6829         ESTAT_ADD(tx_xon_sent);
6830         ESTAT_ADD(tx_xoff_sent);
6831         ESTAT_ADD(tx_flow_control);
6832         ESTAT_ADD(tx_mac_errors);
6833         ESTAT_ADD(tx_single_collisions);
6834         ESTAT_ADD(tx_mult_collisions);
6835         ESTAT_ADD(tx_deferred);
6836         ESTAT_ADD(tx_excessive_collisions);
6837         ESTAT_ADD(tx_late_collisions);
6838         ESTAT_ADD(tx_collide_2times);
6839         ESTAT_ADD(tx_collide_3times);
6840         ESTAT_ADD(tx_collide_4times);
6841         ESTAT_ADD(tx_collide_5times);
6842         ESTAT_ADD(tx_collide_6times);
6843         ESTAT_ADD(tx_collide_7times);
6844         ESTAT_ADD(tx_collide_8times);
6845         ESTAT_ADD(tx_collide_9times);
6846         ESTAT_ADD(tx_collide_10times);
6847         ESTAT_ADD(tx_collide_11times);
6848         ESTAT_ADD(tx_collide_12times);
6849         ESTAT_ADD(tx_collide_13times);
6850         ESTAT_ADD(tx_collide_14times);
6851         ESTAT_ADD(tx_collide_15times);
6852         ESTAT_ADD(tx_ucast_packets);
6853         ESTAT_ADD(tx_mcast_packets);
6854         ESTAT_ADD(tx_bcast_packets);
6855         ESTAT_ADD(tx_carrier_sense_errors);
6856         ESTAT_ADD(tx_discards);
6857         ESTAT_ADD(tx_errors);
6858
6859         ESTAT_ADD(dma_writeq_full);
6860         ESTAT_ADD(dma_write_prioq_full);
6861         ESTAT_ADD(rxbds_empty);
6862         ESTAT_ADD(rx_discards);
6863         ESTAT_ADD(rx_errors);
6864         ESTAT_ADD(rx_threshold_hit);
6865
6866         ESTAT_ADD(dma_readq_full);
6867         ESTAT_ADD(dma_read_prioq_full);
6868         ESTAT_ADD(tx_comp_queue_full);
6869
6870         ESTAT_ADD(ring_set_send_prod_index);
6871         ESTAT_ADD(ring_status_update);
6872         ESTAT_ADD(nic_irqs);
6873         ESTAT_ADD(nic_avoided_irqs);
6874         ESTAT_ADD(nic_tx_threshold_hit);
6875
6876         return estats;
6877 }
6878
6879 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6880 {
6881         struct tg3 *tp = netdev_priv(dev);
6882         struct net_device_stats *stats = &tp->net_stats;
6883         struct net_device_stats *old_stats = &tp->net_stats_prev;
6884         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6885
6886         if (!hw_stats)
6887                 return old_stats;
6888
6889         stats->rx_packets = old_stats->rx_packets +
6890                 get_stat64(&hw_stats->rx_ucast_packets) +
6891                 get_stat64(&hw_stats->rx_mcast_packets) +
6892                 get_stat64(&hw_stats->rx_bcast_packets);
6893                 
6894         stats->tx_packets = old_stats->tx_packets +
6895                 get_stat64(&hw_stats->tx_ucast_packets) +
6896                 get_stat64(&hw_stats->tx_mcast_packets) +
6897                 get_stat64(&hw_stats->tx_bcast_packets);
6898
6899         stats->rx_bytes = old_stats->rx_bytes +
6900                 get_stat64(&hw_stats->rx_octets);
6901         stats->tx_bytes = old_stats->tx_bytes +
6902                 get_stat64(&hw_stats->tx_octets);
6903
6904         stats->rx_errors = old_stats->rx_errors +
6905                 get_stat64(&hw_stats->rx_errors);
6906         stats->tx_errors = old_stats->tx_errors +
6907                 get_stat64(&hw_stats->tx_errors) +
6908                 get_stat64(&hw_stats->tx_mac_errors) +
6909                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6910                 get_stat64(&hw_stats->tx_discards);
6911
6912         stats->multicast = old_stats->multicast +
6913                 get_stat64(&hw_stats->rx_mcast_packets);
6914         stats->collisions = old_stats->collisions +
6915                 get_stat64(&hw_stats->tx_collisions);
6916
6917         stats->rx_length_errors = old_stats->rx_length_errors +
6918                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6919                 get_stat64(&hw_stats->rx_undersize_packets);
6920
6921         stats->rx_over_errors = old_stats->rx_over_errors +
6922                 get_stat64(&hw_stats->rxbds_empty);
6923         stats->rx_frame_errors = old_stats->rx_frame_errors +
6924                 get_stat64(&hw_stats->rx_align_errors);
6925         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6926                 get_stat64(&hw_stats->tx_discards);
6927         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6928                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6929
6930         stats->rx_crc_errors = old_stats->rx_crc_errors +
6931                 calc_crc_errors(tp);
6932
6933         stats->rx_missed_errors = old_stats->rx_missed_errors +
6934                 get_stat64(&hw_stats->rx_discards);
6935
6936         return stats;
6937 }
6938
6939 static inline u32 calc_crc(unsigned char *buf, int len)
6940 {
6941         u32 reg;
6942         u32 tmp;
6943         int j, k;
6944
6945         reg = 0xffffffff;
6946
6947         for (j = 0; j < len; j++) {
6948                 reg ^= buf[j];
6949
6950                 for (k = 0; k < 8; k++) {
6951                         tmp = reg & 0x01;
6952
6953                         reg >>= 1;
6954
6955                         if (tmp) {
6956                                 reg ^= 0xedb88320;
6957                         }
6958                 }
6959         }
6960
6961         return ~reg;
6962 }
6963
6964 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6965 {
6966         /* accept or reject all multicast frames */
6967         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6968         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6969         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6970         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6971 }
6972
6973 static void __tg3_set_rx_mode(struct net_device *dev)
6974 {
6975         struct tg3 *tp = netdev_priv(dev);
6976         u32 rx_mode;
6977
6978         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6979                                   RX_MODE_KEEP_VLAN_TAG);
6980
6981         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6982          * flag clear.
6983          */
6984 #if TG3_VLAN_TAG_USED
6985         if (!tp->vlgrp &&
6986             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6987                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6988 #else
6989         /* By definition, VLAN is disabled always in this
6990          * case.
6991          */
6992         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6993                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6994 #endif
6995
6996         if (dev->flags & IFF_PROMISC) {
6997                 /* Promiscuous mode. */
6998                 rx_mode |= RX_MODE_PROMISC;
6999         } else if (dev->flags & IFF_ALLMULTI) {
7000                 /* Accept all multicast. */
7001                 tg3_set_multi (tp, 1);
7002         } else if (dev->mc_count < 1) {
7003                 /* Reject all multicast. */
7004                 tg3_set_multi (tp, 0);
7005         } else {
7006                 /* Accept one or more multicast(s). */
7007                 struct dev_mc_list *mclist;
7008                 unsigned int i;
7009                 u32 mc_filter[4] = { 0, };
7010                 u32 regidx;
7011                 u32 bit;
7012                 u32 crc;
7013
7014                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7015                      i++, mclist = mclist->next) {
7016
7017                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7018                         bit = ~crc & 0x7f;
7019                         regidx = (bit & 0x60) >> 5;
7020                         bit &= 0x1f;
7021                         mc_filter[regidx] |= (1 << bit);
7022                 }
7023
7024                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7025                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7026                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7027                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7028         }
7029
7030         if (rx_mode != tp->rx_mode) {
7031                 tp->rx_mode = rx_mode;
7032                 tw32_f(MAC_RX_MODE, rx_mode);
7033                 udelay(10);
7034         }
7035 }
7036
7037 static void tg3_set_rx_mode(struct net_device *dev)
7038 {
7039         struct tg3 *tp = netdev_priv(dev);
7040
7041         tg3_full_lock(tp, 0);
7042         __tg3_set_rx_mode(dev);
7043         tg3_full_unlock(tp);
7044 }
7045
7046 #define TG3_REGDUMP_LEN         (32 * 1024)
7047
7048 static int tg3_get_regs_len(struct net_device *dev)
7049 {
7050         return TG3_REGDUMP_LEN;
7051 }
7052
7053 static void tg3_get_regs(struct net_device *dev,
7054                 struct ethtool_regs *regs, void *_p)
7055 {
7056         u32 *p = _p;
7057         struct tg3 *tp = netdev_priv(dev);
7058         u8 *orig_p = _p;
7059         int i;
7060
7061         regs->version = 0;
7062
7063         memset(p, 0, TG3_REGDUMP_LEN);
7064
7065         tg3_full_lock(tp, 0);
7066
7067 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7068 #define GET_REG32_LOOP(base,len)                \
7069 do {    p = (u32 *)(orig_p + (base));           \
7070         for (i = 0; i < len; i += 4)            \
7071                 __GET_REG32((base) + i);        \
7072 } while (0)
7073 #define GET_REG32_1(reg)                        \
7074 do {    p = (u32 *)(orig_p + (reg));            \
7075         __GET_REG32((reg));                     \
7076 } while (0)
7077
7078         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7079         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7080         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7081         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7082         GET_REG32_1(SNDDATAC_MODE);
7083         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7084         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7085         GET_REG32_1(SNDBDC_MODE);
7086         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7087         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7088         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7089         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7090         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7091         GET_REG32_1(RCVDCC_MODE);
7092         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7093         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7094         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7095         GET_REG32_1(MBFREE_MODE);
7096         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7097         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7098         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7099         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7100         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7101         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7102         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7103         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7104         GET_REG32_LOOP(FTQ_RESET, 0x120);
7105         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7106         GET_REG32_1(DMAC_MODE);
7107         GET_REG32_LOOP(GRC_MODE, 0x4c);
7108         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7109                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7110
7111 #undef __GET_REG32
7112 #undef GET_REG32_LOOP
7113 #undef GET_REG32_1
7114
7115         tg3_full_unlock(tp);
7116 }
7117
7118 static int tg3_get_eeprom_len(struct net_device *dev)
7119 {
7120         struct tg3 *tp = netdev_priv(dev);
7121
7122         return tp->nvram_size;
7123 }
7124
7125 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7126
7127 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7128 {
7129         struct tg3 *tp = netdev_priv(dev);
7130         int ret;
7131         u8  *pd;
7132         u32 i, offset, len, val, b_offset, b_count;
7133
7134         offset = eeprom->offset;
7135         len = eeprom->len;
7136         eeprom->len = 0;
7137
7138         eeprom->magic = TG3_EEPROM_MAGIC;
7139
7140         if (offset & 3) {
7141                 /* adjustments to start on required 4 byte boundary */
7142                 b_offset = offset & 3;
7143                 b_count = 4 - b_offset;
7144                 if (b_count > len) {
7145                         /* i.e. offset=1 len=2 */
7146                         b_count = len;
7147                 }
7148                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7149                 if (ret)
7150                         return ret;
7151                 val = cpu_to_le32(val);
7152                 memcpy(data, ((char*)&val) + b_offset, b_count);
7153                 len -= b_count;
7154                 offset += b_count;
7155                 eeprom->len += b_count;
7156         }
7157
7158         /* read bytes upto the last 4 byte boundary */
7159         pd = &data[eeprom->len];
7160         for (i = 0; i < (len - (len & 3)); i += 4) {
7161                 ret = tg3_nvram_read(tp, offset + i, &val);
7162                 if (ret) {
7163                         eeprom->len += i;
7164                         return ret;
7165                 }
7166                 val = cpu_to_le32(val);
7167                 memcpy(pd + i, &val, 4);
7168         }
7169         eeprom->len += i;
7170
7171         if (len & 3) {
7172                 /* read last bytes not ending on 4 byte boundary */
7173                 pd = &data[eeprom->len];
7174                 b_count = len & 3;
7175                 b_offset = offset + len - b_count;
7176                 ret = tg3_nvram_read(tp, b_offset, &val);
7177                 if (ret)
7178                         return ret;
7179                 val = cpu_to_le32(val);
7180                 memcpy(pd, ((char*)&val), b_count);
7181                 eeprom->len += b_count;
7182         }
7183         return 0;
7184 }
7185
7186 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7187
7188 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7189 {
7190         struct tg3 *tp = netdev_priv(dev);
7191         int ret;
7192         u32 offset, len, b_offset, odd_len, start, end;
7193         u8 *buf;
7194
7195         if (eeprom->magic != TG3_EEPROM_MAGIC)
7196                 return -EINVAL;
7197
7198         offset = eeprom->offset;
7199         len = eeprom->len;
7200
7201         if ((b_offset = (offset & 3))) {
7202                 /* adjustments to start on required 4 byte boundary */
7203                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7204                 if (ret)
7205                         return ret;
7206                 start = cpu_to_le32(start);
7207                 len += b_offset;
7208                 offset &= ~3;
7209                 if (len < 4)
7210                         len = 4;
7211         }
7212
7213         odd_len = 0;
7214         if (len & 3) {
7215                 /* adjustments to end on required 4 byte boundary */
7216                 odd_len = 1;
7217                 len = (len + 3) & ~3;
7218                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7219                 if (ret)
7220                         return ret;
7221                 end = cpu_to_le32(end);
7222         }
7223
7224         buf = data;
7225         if (b_offset || odd_len) {
7226                 buf = kmalloc(len, GFP_KERNEL);
7227                 if (buf == 0)
7228                         return -ENOMEM;
7229                 if (b_offset)
7230                         memcpy(buf, &start, 4);
7231                 if (odd_len)
7232                         memcpy(buf+len-4, &end, 4);
7233                 memcpy(buf + b_offset, data, eeprom->len);
7234         }
7235
7236         ret = tg3_nvram_write_block(tp, offset, len, buf);
7237
7238         if (buf != data)
7239                 kfree(buf);
7240
7241         return ret;
7242 }
7243
7244 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7245 {
7246         struct tg3 *tp = netdev_priv(dev);
7247   
7248         cmd->supported = (SUPPORTED_Autoneg);
7249
7250         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7251                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7252                                    SUPPORTED_1000baseT_Full);
7253
7254         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7255                 cmd->supported |= (SUPPORTED_100baseT_Half |
7256                                   SUPPORTED_100baseT_Full |
7257                                   SUPPORTED_10baseT_Half |
7258                                   SUPPORTED_10baseT_Full |
7259                                   SUPPORTED_MII);
7260         else
7261                 cmd->supported |= SUPPORTED_FIBRE;
7262   
7263         cmd->advertising = tp->link_config.advertising;
7264         if (netif_running(dev)) {
7265                 cmd->speed = tp->link_config.active_speed;
7266                 cmd->duplex = tp->link_config.active_duplex;
7267         }
7268         cmd->port = 0;
7269         cmd->phy_address = PHY_ADDR;
7270         cmd->transceiver = 0;
7271         cmd->autoneg = tp->link_config.autoneg;
7272         cmd->maxtxpkt = 0;
7273         cmd->maxrxpkt = 0;
7274         return 0;
7275 }
7276   
7277 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7278 {
7279         struct tg3 *tp = netdev_priv(dev);
7280   
7281         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7282                 /* These are the only valid advertisement bits allowed.  */
7283                 if (cmd->autoneg == AUTONEG_ENABLE &&
7284                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7285                                           ADVERTISED_1000baseT_Full |
7286                                           ADVERTISED_Autoneg |
7287                                           ADVERTISED_FIBRE)))
7288                         return -EINVAL;
7289                 /* Fiber can only do SPEED_1000.  */
7290                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7291                          (cmd->speed != SPEED_1000))
7292                         return -EINVAL;
7293         /* Copper cannot force SPEED_1000.  */
7294         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7295                    (cmd->speed == SPEED_1000))
7296                 return -EINVAL;
7297         else if ((cmd->speed == SPEED_1000) &&
7298                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7299                 return -EINVAL;
7300
7301         tg3_full_lock(tp, 0);
7302
7303         tp->link_config.autoneg = cmd->autoneg;
7304         if (cmd->autoneg == AUTONEG_ENABLE) {
7305                 tp->link_config.advertising = cmd->advertising;
7306                 tp->link_config.speed = SPEED_INVALID;
7307                 tp->link_config.duplex = DUPLEX_INVALID;
7308         } else {
7309                 tp->link_config.advertising = 0;
7310                 tp->link_config.speed = cmd->speed;
7311                 tp->link_config.duplex = cmd->duplex;
7312         }
7313   
7314         if (netif_running(dev))
7315                 tg3_setup_phy(tp, 1);
7316
7317         tg3_full_unlock(tp);
7318   
7319         return 0;
7320 }
7321   
7322 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7323 {
7324         struct tg3 *tp = netdev_priv(dev);
7325   
7326         strcpy(info->driver, DRV_MODULE_NAME);
7327         strcpy(info->version, DRV_MODULE_VERSION);
7328         strcpy(info->bus_info, pci_name(tp->pdev));
7329 }
7330   
7331 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7332 {
7333         struct tg3 *tp = netdev_priv(dev);
7334   
7335         wol->supported = WAKE_MAGIC;
7336         wol->wolopts = 0;
7337         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7338                 wol->wolopts = WAKE_MAGIC;
7339         memset(&wol->sopass, 0, sizeof(wol->sopass));
7340 }
7341   
7342 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7343 {
7344         struct tg3 *tp = netdev_priv(dev);
7345   
7346         if (wol->wolopts & ~WAKE_MAGIC)
7347                 return -EINVAL;
7348         if ((wol->wolopts & WAKE_MAGIC) &&
7349             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7350             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7351                 return -EINVAL;
7352   
7353         spin_lock_bh(&tp->lock);
7354         if (wol->wolopts & WAKE_MAGIC)
7355                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7356         else
7357                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7358         spin_unlock_bh(&tp->lock);
7359   
7360         return 0;
7361 }
7362   
7363 static u32 tg3_get_msglevel(struct net_device *dev)
7364 {
7365         struct tg3 *tp = netdev_priv(dev);
7366         return tp->msg_enable;
7367 }
7368   
7369 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7370 {
7371         struct tg3 *tp = netdev_priv(dev);
7372         tp->msg_enable = value;
7373 }
7374   
7375 #if TG3_TSO_SUPPORT != 0
7376 static int tg3_set_tso(struct net_device *dev, u32 value)
7377 {
7378         struct tg3 *tp = netdev_priv(dev);
7379
7380         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7381                 if (value)
7382                         return -EINVAL;
7383                 return 0;
7384         }
7385         return ethtool_op_set_tso(dev, value);
7386 }
7387 #endif
7388   
7389 static int tg3_nway_reset(struct net_device *dev)
7390 {
7391         struct tg3 *tp = netdev_priv(dev);
7392         u32 bmcr;
7393         int r;
7394   
7395         if (!netif_running(dev))
7396                 return -EAGAIN;
7397
7398         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7399                 return -EINVAL;
7400
7401         spin_lock_bh(&tp->lock);
7402         r = -EINVAL;
7403         tg3_readphy(tp, MII_BMCR, &bmcr);
7404         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7405             ((bmcr & BMCR_ANENABLE) ||
7406              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7407                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7408                                            BMCR_ANENABLE);
7409                 r = 0;
7410         }
7411         spin_unlock_bh(&tp->lock);
7412   
7413         return r;
7414 }
7415   
7416 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7417 {
7418         struct tg3 *tp = netdev_priv(dev);
7419   
7420         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7421         ering->rx_mini_max_pending = 0;
7422         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7423
7424         ering->rx_pending = tp->rx_pending;
7425         ering->rx_mini_pending = 0;
7426         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7427         ering->tx_pending = tp->tx_pending;
7428 }
7429   
7430 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7431 {
7432         struct tg3 *tp = netdev_priv(dev);
7433         int irq_sync = 0;
7434   
7435         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7436             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7437             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7438                 return -EINVAL;
7439   
7440         if (netif_running(dev)) {
7441                 tg3_netif_stop(tp);
7442                 irq_sync = 1;
7443         }
7444
7445         tg3_full_lock(tp, irq_sync);
7446   
7447         tp->rx_pending = ering->rx_pending;
7448
7449         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7450             tp->rx_pending > 63)
7451                 tp->rx_pending = 63;
7452         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7453         tp->tx_pending = ering->tx_pending;
7454
7455         if (netif_running(dev)) {
7456                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7457                 tg3_init_hw(tp);
7458                 tg3_netif_start(tp);
7459         }
7460
7461         tg3_full_unlock(tp);
7462   
7463         return 0;
7464 }
7465   
7466 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7467 {
7468         struct tg3 *tp = netdev_priv(dev);
7469   
7470         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7471         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7472         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7473 }
7474   
7475 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7476 {
7477         struct tg3 *tp = netdev_priv(dev);
7478         int irq_sync = 0;
7479   
7480         if (netif_running(dev)) {
7481                 tg3_netif_stop(tp);
7482                 irq_sync = 1;
7483         }
7484
7485         tg3_full_lock(tp, irq_sync);
7486
7487         if (epause->autoneg)
7488                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7489         else
7490                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7491         if (epause->rx_pause)
7492                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7493         else
7494                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7495         if (epause->tx_pause)
7496                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7497         else
7498                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7499
7500         if (netif_running(dev)) {
7501                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7502                 tg3_init_hw(tp);
7503                 tg3_netif_start(tp);
7504         }
7505
7506         tg3_full_unlock(tp);
7507   
7508         return 0;
7509 }
7510   
7511 static u32 tg3_get_rx_csum(struct net_device *dev)
7512 {
7513         struct tg3 *tp = netdev_priv(dev);
7514         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7515 }
7516   
7517 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7518 {
7519         struct tg3 *tp = netdev_priv(dev);
7520   
7521         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7522                 if (data != 0)
7523                         return -EINVAL;
7524                 return 0;
7525         }
7526   
7527         spin_lock_bh(&tp->lock);
7528         if (data)
7529                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7530         else
7531                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7532         spin_unlock_bh(&tp->lock);
7533   
7534         return 0;
7535 }
7536   
7537 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7538 {
7539         struct tg3 *tp = netdev_priv(dev);
7540   
7541         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7542                 if (data != 0)
7543                         return -EINVAL;
7544                 return 0;
7545         }
7546   
7547         if (data)
7548                 dev->features |= NETIF_F_IP_CSUM;
7549         else
7550                 dev->features &= ~NETIF_F_IP_CSUM;
7551
7552         return 0;
7553 }
7554
7555 static int tg3_get_stats_count (struct net_device *dev)
7556 {
7557         return TG3_NUM_STATS;
7558 }
7559
7560 static int tg3_get_test_count (struct net_device *dev)
7561 {
7562         return TG3_NUM_TEST;
7563 }
7564
7565 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7566 {
7567         switch (stringset) {
7568         case ETH_SS_STATS:
7569                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7570                 break;
7571         case ETH_SS_TEST:
7572                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7573                 break;
7574         default:
7575                 WARN_ON(1);     /* we need a WARN() */
7576                 break;
7577         }
7578 }
7579
7580 static int tg3_phys_id(struct net_device *dev, u32 data)
7581 {
7582         struct tg3 *tp = netdev_priv(dev);
7583         int i;
7584
7585         if (!netif_running(tp->dev))
7586                 return -EAGAIN;
7587
7588         if (data == 0)
7589                 data = 2;
7590
7591         for (i = 0; i < (data * 2); i++) {
7592                 if ((i % 2) == 0)
7593                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7594                                            LED_CTRL_1000MBPS_ON |
7595                                            LED_CTRL_100MBPS_ON |
7596                                            LED_CTRL_10MBPS_ON |
7597                                            LED_CTRL_TRAFFIC_OVERRIDE |
7598                                            LED_CTRL_TRAFFIC_BLINK |
7599                                            LED_CTRL_TRAFFIC_LED);
7600         
7601                 else
7602                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7603                                            LED_CTRL_TRAFFIC_OVERRIDE);
7604
7605                 if (msleep_interruptible(500))
7606                         break;
7607         }
7608         tw32(MAC_LED_CTRL, tp->led_ctrl);
7609         return 0;
7610 }
7611
7612 static void tg3_get_ethtool_stats (struct net_device *dev,
7613                                    struct ethtool_stats *estats, u64 *tmp_stats)
7614 {
7615         struct tg3 *tp = netdev_priv(dev);
7616         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7617 }
7618
7619 #define NVRAM_TEST_SIZE 0x100
7620
7621 static int tg3_test_nvram(struct tg3 *tp)
7622 {
7623         u32 *buf, csum;
7624         int i, j, err = 0;
7625
7626         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7627         if (buf == NULL)
7628                 return -ENOMEM;
7629
7630         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7631                 u32 val;
7632
7633                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7634                         break;
7635                 buf[j] = cpu_to_le32(val);
7636         }
7637         if (i < NVRAM_TEST_SIZE)
7638                 goto out;
7639
7640         err = -EIO;
7641         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7642                 goto out;
7643
7644         /* Bootstrap checksum at offset 0x10 */
7645         csum = calc_crc((unsigned char *) buf, 0x10);
7646         if(csum != cpu_to_le32(buf[0x10/4]))
7647                 goto out;
7648
7649         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7650         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7651         if (csum != cpu_to_le32(buf[0xfc/4]))
7652                  goto out;
7653
7654         err = 0;
7655
7656 out:
7657         kfree(buf);
7658         return err;
7659 }
7660
7661 #define TG3_SERDES_TIMEOUT_SEC  2
7662 #define TG3_COPPER_TIMEOUT_SEC  6
7663
7664 static int tg3_test_link(struct tg3 *tp)
7665 {
7666         int i, max;
7667
7668         if (!netif_running(tp->dev))
7669                 return -ENODEV;
7670
7671         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
7672                 max = TG3_SERDES_TIMEOUT_SEC;
7673         else
7674                 max = TG3_COPPER_TIMEOUT_SEC;
7675
7676         for (i = 0; i < max; i++) {
7677                 if (netif_carrier_ok(tp->dev))
7678                         return 0;
7679
7680                 if (msleep_interruptible(1000))
7681                         break;
7682         }
7683
7684         return -EIO;
7685 }
7686
7687 /* Only test the commonly used registers */
7688 static int tg3_test_registers(struct tg3 *tp)
7689 {
7690         int i, is_5705;
7691         u32 offset, read_mask, write_mask, val, save_val, read_val;
7692         static struct {
7693                 u16 offset;
7694                 u16 flags;
7695 #define TG3_FL_5705     0x1
7696 #define TG3_FL_NOT_5705 0x2
7697 #define TG3_FL_NOT_5788 0x4
7698                 u32 read_mask;
7699                 u32 write_mask;
7700         } reg_tbl[] = {
7701                 /* MAC Control Registers */
7702                 { MAC_MODE, TG3_FL_NOT_5705,
7703                         0x00000000, 0x00ef6f8c },
7704                 { MAC_MODE, TG3_FL_5705,
7705                         0x00000000, 0x01ef6b8c },
7706                 { MAC_STATUS, TG3_FL_NOT_5705,
7707                         0x03800107, 0x00000000 },
7708                 { MAC_STATUS, TG3_FL_5705,
7709                         0x03800100, 0x00000000 },
7710                 { MAC_ADDR_0_HIGH, 0x0000,
7711                         0x00000000, 0x0000ffff },
7712                 { MAC_ADDR_0_LOW, 0x0000,
7713                         0x00000000, 0xffffffff },
7714                 { MAC_RX_MTU_SIZE, 0x0000,
7715                         0x00000000, 0x0000ffff },
7716                 { MAC_TX_MODE, 0x0000,
7717                         0x00000000, 0x00000070 },
7718                 { MAC_TX_LENGTHS, 0x0000,
7719                         0x00000000, 0x00003fff },
7720                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7721                         0x00000000, 0x000007fc },
7722                 { MAC_RX_MODE, TG3_FL_5705,
7723                         0x00000000, 0x000007dc },
7724                 { MAC_HASH_REG_0, 0x0000,
7725                         0x00000000, 0xffffffff },
7726                 { MAC_HASH_REG_1, 0x0000,
7727                         0x00000000, 0xffffffff },
7728                 { MAC_HASH_REG_2, 0x0000,
7729                         0x00000000, 0xffffffff },
7730                 { MAC_HASH_REG_3, 0x0000,
7731                         0x00000000, 0xffffffff },
7732
7733                 /* Receive Data and Receive BD Initiator Control Registers. */
7734                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7735                         0x00000000, 0xffffffff },
7736                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7737                         0x00000000, 0xffffffff },
7738                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7739                         0x00000000, 0x00000003 },
7740                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7741                         0x00000000, 0xffffffff },
7742                 { RCVDBDI_STD_BD+0, 0x0000,
7743                         0x00000000, 0xffffffff },
7744                 { RCVDBDI_STD_BD+4, 0x0000,
7745                         0x00000000, 0xffffffff },
7746                 { RCVDBDI_STD_BD+8, 0x0000,
7747                         0x00000000, 0xffff0002 },
7748                 { RCVDBDI_STD_BD+0xc, 0x0000,
7749                         0x00000000, 0xffffffff },
7750         
7751                 /* Receive BD Initiator Control Registers. */
7752                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7753                         0x00000000, 0xffffffff },
7754                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7755                         0x00000000, 0x000003ff },
7756                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7757                         0x00000000, 0xffffffff },
7758         
7759                 /* Host Coalescing Control Registers. */
7760                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7761                         0x00000000, 0x00000004 },
7762                 { HOSTCC_MODE, TG3_FL_5705,
7763                         0x00000000, 0x000000f6 },
7764                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7765                         0x00000000, 0xffffffff },
7766                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7767                         0x00000000, 0x000003ff },
7768                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7769                         0x00000000, 0xffffffff },
7770                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7771                         0x00000000, 0x000003ff },
7772                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7773                         0x00000000, 0xffffffff },
7774                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7775                         0x00000000, 0x000000ff },
7776                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7777                         0x00000000, 0xffffffff },
7778                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7779                         0x00000000, 0x000000ff },
7780                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7781                         0x00000000, 0xffffffff },
7782                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7783                         0x00000000, 0xffffffff },
7784                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7785                         0x00000000, 0xffffffff },
7786                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7787                         0x00000000, 0x000000ff },
7788                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7789                         0x00000000, 0xffffffff },
7790                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7791                         0x00000000, 0x000000ff },
7792                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7793                         0x00000000, 0xffffffff },
7794                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7795                         0x00000000, 0xffffffff },
7796                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7797                         0x00000000, 0xffffffff },
7798                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7799                         0x00000000, 0xffffffff },
7800                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7801                         0x00000000, 0xffffffff },
7802                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7803                         0xffffffff, 0x00000000 },
7804                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7805                         0xffffffff, 0x00000000 },
7806
7807                 /* Buffer Manager Control Registers. */
7808                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7809                         0x00000000, 0x007fff80 },
7810                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7811                         0x00000000, 0x007fffff },
7812                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7813                         0x00000000, 0x0000003f },
7814                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7815                         0x00000000, 0x000001ff },
7816                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7817                         0x00000000, 0x000001ff },
7818                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7819                         0xffffffff, 0x00000000 },
7820                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7821                         0xffffffff, 0x00000000 },
7822         
7823                 /* Mailbox Registers */
7824                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7825                         0x00000000, 0x000001ff },
7826                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7827                         0x00000000, 0x000001ff },
7828                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7829                         0x00000000, 0x000007ff },
7830                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7831                         0x00000000, 0x000001ff },
7832
7833                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7834         };
7835
7836         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7837                 is_5705 = 1;
7838         else
7839                 is_5705 = 0;
7840
7841         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7842                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7843                         continue;
7844
7845                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7846                         continue;
7847
7848                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7849                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7850                         continue;
7851
7852                 offset = (u32) reg_tbl[i].offset;
7853                 read_mask = reg_tbl[i].read_mask;
7854                 write_mask = reg_tbl[i].write_mask;
7855
7856                 /* Save the original register content */
7857                 save_val = tr32(offset);
7858
7859                 /* Determine the read-only value. */
7860                 read_val = save_val & read_mask;
7861
7862                 /* Write zero to the register, then make sure the read-only bits
7863                  * are not changed and the read/write bits are all zeros.
7864                  */
7865                 tw32(offset, 0);
7866
7867                 val = tr32(offset);
7868
7869                 /* Test the read-only and read/write bits. */
7870                 if (((val & read_mask) != read_val) || (val & write_mask))
7871                         goto out;
7872
7873                 /* Write ones to all the bits defined by RdMask and WrMask, then
7874                  * make sure the read-only bits are not changed and the
7875                  * read/write bits are all ones.
7876                  */
7877                 tw32(offset, read_mask | write_mask);
7878
7879                 val = tr32(offset);
7880
7881                 /* Test the read-only bits. */
7882                 if ((val & read_mask) != read_val)
7883                         goto out;
7884
7885                 /* Test the read/write bits. */
7886                 if ((val & write_mask) != write_mask)
7887                         goto out;
7888
7889                 tw32(offset, save_val);
7890         }
7891
7892         return 0;
7893
7894 out:
7895         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7896         tw32(offset, save_val);
7897         return -EIO;
7898 }
7899
7900 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7901 {
7902         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7903         int i;
7904         u32 j;
7905
7906         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7907                 for (j = 0; j < len; j += 4) {
7908                         u32 val;
7909
7910                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7911                         tg3_read_mem(tp, offset + j, &val);
7912                         if (val != test_pattern[i])
7913                                 return -EIO;
7914                 }
7915         }
7916         return 0;
7917 }
7918
7919 static int tg3_test_memory(struct tg3 *tp)
7920 {
7921         static struct mem_entry {
7922                 u32 offset;
7923                 u32 len;
7924         } mem_tbl_570x[] = {
7925                 { 0x00000000, 0x01000},
7926                 { 0x00002000, 0x1c000},
7927                 { 0xffffffff, 0x00000}
7928         }, mem_tbl_5705[] = {
7929                 { 0x00000100, 0x0000c},
7930                 { 0x00000200, 0x00008},
7931                 { 0x00000b50, 0x00400},
7932                 { 0x00004000, 0x00800},
7933                 { 0x00006000, 0x01000},
7934                 { 0x00008000, 0x02000},
7935                 { 0x00010000, 0x0e000},
7936                 { 0xffffffff, 0x00000}
7937         };
7938         struct mem_entry *mem_tbl;
7939         int err = 0;
7940         int i;
7941
7942         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7943                 mem_tbl = mem_tbl_5705;
7944         else
7945                 mem_tbl = mem_tbl_570x;
7946
7947         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7948                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7949                     mem_tbl[i].len)) != 0)
7950                         break;
7951         }
7952         
7953         return err;
7954 }
7955
7956 #define TG3_MAC_LOOPBACK        0
7957 #define TG3_PHY_LOOPBACK        1
7958
7959 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7960 {
7961         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
7962         u32 desc_idx;
7963         struct sk_buff *skb, *rx_skb;
7964         u8 *tx_data;
7965         dma_addr_t map;
7966         int num_pkts, tx_len, rx_len, i, err;
7967         struct tg3_rx_buffer_desc *desc;
7968
7969         if (loopback_mode == TG3_MAC_LOOPBACK) {
7970                 /* HW errata - mac loopback fails in some cases on 5780.
7971                  * Normal traffic and PHY loopback are not affected by
7972                  * errata.
7973                  */
7974                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7975                         return 0;
7976
7977                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7978                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7979                            MAC_MODE_PORT_MODE_GMII;
7980                 tw32(MAC_MODE, mac_mode);
7981         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7982                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7983                                            BMCR_SPEED1000);
7984                 udelay(40);
7985                 /* reset to prevent losing 1st rx packet intermittently */
7986                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7987                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7988                         udelay(10);
7989                         tw32_f(MAC_RX_MODE, tp->rx_mode);
7990                 }
7991                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7992                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7993                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7994                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7995                 tw32(MAC_MODE, mac_mode);
7996         }
7997         else
7998                 return -EINVAL;
7999
8000         err = -EIO;
8001
8002         tx_len = 1514;
8003         skb = dev_alloc_skb(tx_len);
8004         tx_data = skb_put(skb, tx_len);
8005         memcpy(tx_data, tp->dev->dev_addr, 6);
8006         memset(tx_data + 6, 0x0, 8);
8007
8008         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8009
8010         for (i = 14; i < tx_len; i++)
8011                 tx_data[i] = (u8) (i & 0xff);
8012
8013         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8014
8015         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8016              HOSTCC_MODE_NOW);
8017
8018         udelay(10);
8019
8020         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8021
8022         num_pkts = 0;
8023
8024         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8025
8026         tp->tx_prod++;
8027         num_pkts++;
8028
8029         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8030                      tp->tx_prod);
8031         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8032
8033         udelay(10);
8034
8035         for (i = 0; i < 10; i++) {
8036                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8037                        HOSTCC_MODE_NOW);
8038
8039                 udelay(10);
8040
8041                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8042                 rx_idx = tp->hw_status->idx[0].rx_producer;
8043                 if ((tx_idx == tp->tx_prod) &&
8044                     (rx_idx == (rx_start_idx + num_pkts)))
8045                         break;
8046         }
8047
8048         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8049         dev_kfree_skb(skb);
8050
8051         if (tx_idx != tp->tx_prod)
8052                 goto out;
8053
8054         if (rx_idx != rx_start_idx + num_pkts)
8055                 goto out;
8056
8057         desc = &tp->rx_rcb[rx_start_idx];
8058         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8059         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8060         if (opaque_key != RXD_OPAQUE_RING_STD)
8061                 goto out;
8062
8063         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8064             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8065                 goto out;
8066
8067         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8068         if (rx_len != tx_len)
8069                 goto out;
8070
8071         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8072
8073         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8074         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8075
8076         for (i = 14; i < tx_len; i++) {
8077                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8078                         goto out;
8079         }
8080         err = 0;
8081         
8082         /* tg3_free_rings will unmap and free the rx_skb */
8083 out:
8084         return err;
8085 }
8086
8087 #define TG3_MAC_LOOPBACK_FAILED         1
8088 #define TG3_PHY_LOOPBACK_FAILED         2
8089 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8090                                          TG3_PHY_LOOPBACK_FAILED)
8091
8092 static int tg3_test_loopback(struct tg3 *tp)
8093 {
8094         int err = 0;
8095
8096         if (!netif_running(tp->dev))
8097                 return TG3_LOOPBACK_FAILED;
8098
8099         tg3_reset_hw(tp);
8100
8101         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8102                 err |= TG3_MAC_LOOPBACK_FAILED;
8103         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8104                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8105                         err |= TG3_PHY_LOOPBACK_FAILED;
8106         }
8107
8108         return err;
8109 }
8110
8111 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8112                           u64 *data)
8113 {
8114         struct tg3 *tp = netdev_priv(dev);
8115
8116         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8117
8118         if (tg3_test_nvram(tp) != 0) {
8119                 etest->flags |= ETH_TEST_FL_FAILED;
8120                 data[0] = 1;
8121         }
8122         if (tg3_test_link(tp) != 0) {
8123                 etest->flags |= ETH_TEST_FL_FAILED;
8124                 data[1] = 1;
8125         }
8126         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8127                 int irq_sync = 0;
8128
8129                 if (netif_running(dev)) {
8130                         tg3_netif_stop(tp);
8131                         irq_sync = 1;
8132                 }
8133
8134                 tg3_full_lock(tp, irq_sync);
8135
8136                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8137                 tg3_nvram_lock(tp);
8138                 tg3_halt_cpu(tp, RX_CPU_BASE);
8139                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8140                         tg3_halt_cpu(tp, TX_CPU_BASE);
8141                 tg3_nvram_unlock(tp);
8142
8143                 if (tg3_test_registers(tp) != 0) {
8144                         etest->flags |= ETH_TEST_FL_FAILED;
8145                         data[2] = 1;
8146                 }
8147                 if (tg3_test_memory(tp) != 0) {
8148                         etest->flags |= ETH_TEST_FL_FAILED;
8149                         data[3] = 1;
8150                 }
8151                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8152                         etest->flags |= ETH_TEST_FL_FAILED;
8153
8154                 tg3_full_unlock(tp);
8155
8156                 if (tg3_test_interrupt(tp) != 0) {
8157                         etest->flags |= ETH_TEST_FL_FAILED;
8158                         data[5] = 1;
8159                 }
8160
8161                 tg3_full_lock(tp, 0);
8162
8163                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8164                 if (netif_running(dev)) {
8165                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8166                         tg3_init_hw(tp);
8167                         tg3_netif_start(tp);
8168                 }
8169
8170                 tg3_full_unlock(tp);
8171         }
8172 }
8173
8174 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8175 {
8176         struct mii_ioctl_data *data = if_mii(ifr);
8177         struct tg3 *tp = netdev_priv(dev);
8178         int err;
8179
8180         switch(cmd) {
8181         case SIOCGMIIPHY:
8182                 data->phy_id = PHY_ADDR;
8183
8184                 /* fallthru */
8185         case SIOCGMIIREG: {
8186                 u32 mii_regval;
8187
8188                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8189                         break;                  /* We have no PHY */
8190
8191                 spin_lock_bh(&tp->lock);
8192                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8193                 spin_unlock_bh(&tp->lock);
8194
8195                 data->val_out = mii_regval;
8196
8197                 return err;
8198         }
8199
8200         case SIOCSMIIREG:
8201                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8202                         break;                  /* We have no PHY */
8203
8204                 if (!capable(CAP_NET_ADMIN))
8205                         return -EPERM;
8206
8207                 spin_lock_bh(&tp->lock);
8208                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8209                 spin_unlock_bh(&tp->lock);
8210
8211                 return err;
8212
8213         default:
8214                 /* do nothing */
8215                 break;
8216         }
8217         return -EOPNOTSUPP;
8218 }
8219
8220 #if TG3_VLAN_TAG_USED
8221 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8222 {
8223         struct tg3 *tp = netdev_priv(dev);
8224
8225         tg3_full_lock(tp, 0);
8226
8227         tp->vlgrp = grp;
8228
8229         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8230         __tg3_set_rx_mode(dev);
8231
8232         tg3_full_unlock(tp);
8233 }
8234
8235 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8236 {
8237         struct tg3 *tp = netdev_priv(dev);
8238
8239         tg3_full_lock(tp, 0);
8240         if (tp->vlgrp)
8241                 tp->vlgrp->vlan_devices[vid] = NULL;
8242         tg3_full_unlock(tp);
8243 }
8244 #endif
8245
8246 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8247 {
8248         struct tg3 *tp = netdev_priv(dev);
8249
8250         memcpy(ec, &tp->coal, sizeof(*ec));
8251         return 0;
8252 }
8253
8254 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8255 {
8256         struct tg3 *tp = netdev_priv(dev);
8257         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8258         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8259
8260         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8261                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8262                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8263                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8264                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8265         }
8266
8267         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8268             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8269             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8270             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8271             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8272             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8273             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8274             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8275             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8276             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8277                 return -EINVAL;
8278
8279         /* No rx interrupts will be generated if both are zero */
8280         if ((ec->rx_coalesce_usecs == 0) &&
8281             (ec->rx_max_coalesced_frames == 0))
8282                 return -EINVAL;
8283
8284         /* No tx interrupts will be generated if both are zero */
8285         if ((ec->tx_coalesce_usecs == 0) &&
8286             (ec->tx_max_coalesced_frames == 0))
8287                 return -EINVAL;
8288
8289         /* Only copy relevant parameters, ignore all others. */
8290         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8291         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8292         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8293         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8294         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8295         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8296         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8297         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8298         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8299
8300         if (netif_running(dev)) {
8301                 tg3_full_lock(tp, 0);
8302                 __tg3_set_coalesce(tp, &tp->coal);
8303                 tg3_full_unlock(tp);
8304         }
8305         return 0;
8306 }
8307
8308 static struct ethtool_ops tg3_ethtool_ops = {
8309         .get_settings           = tg3_get_settings,
8310         .set_settings           = tg3_set_settings,
8311         .get_drvinfo            = tg3_get_drvinfo,
8312         .get_regs_len           = tg3_get_regs_len,
8313         .get_regs               = tg3_get_regs,
8314         .get_wol                = tg3_get_wol,
8315         .set_wol                = tg3_set_wol,
8316         .get_msglevel           = tg3_get_msglevel,
8317         .set_msglevel           = tg3_set_msglevel,
8318         .nway_reset             = tg3_nway_reset,
8319         .get_link               = ethtool_op_get_link,
8320         .get_eeprom_len         = tg3_get_eeprom_len,
8321         .get_eeprom             = tg3_get_eeprom,
8322         .set_eeprom             = tg3_set_eeprom,
8323         .get_ringparam          = tg3_get_ringparam,
8324         .set_ringparam          = tg3_set_ringparam,
8325         .get_pauseparam         = tg3_get_pauseparam,
8326         .set_pauseparam         = tg3_set_pauseparam,
8327         .get_rx_csum            = tg3_get_rx_csum,
8328         .set_rx_csum            = tg3_set_rx_csum,
8329         .get_tx_csum            = ethtool_op_get_tx_csum,
8330         .set_tx_csum            = tg3_set_tx_csum,
8331         .get_sg                 = ethtool_op_get_sg,
8332         .set_sg                 = ethtool_op_set_sg,
8333 #if TG3_TSO_SUPPORT != 0
8334         .get_tso                = ethtool_op_get_tso,
8335         .set_tso                = tg3_set_tso,
8336 #endif
8337         .self_test_count        = tg3_get_test_count,
8338         .self_test              = tg3_self_test,
8339         .get_strings            = tg3_get_strings,
8340         .phys_id                = tg3_phys_id,
8341         .get_stats_count        = tg3_get_stats_count,
8342         .get_ethtool_stats      = tg3_get_ethtool_stats,
8343         .get_coalesce           = tg3_get_coalesce,
8344         .set_coalesce           = tg3_set_coalesce,
8345         .get_perm_addr          = ethtool_op_get_perm_addr,
8346 };
8347
8348 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8349 {
8350         u32 cursize, val;
8351
8352         tp->nvram_size = EEPROM_CHIP_SIZE;
8353
8354         if (tg3_nvram_read(tp, 0, &val) != 0)
8355                 return;
8356
8357         if (swab32(val) != TG3_EEPROM_MAGIC)
8358                 return;
8359
8360         /*
8361          * Size the chip by reading offsets at increasing powers of two.
8362          * When we encounter our validation signature, we know the addressing
8363          * has wrapped around, and thus have our chip size.
8364          */
8365         cursize = 0x800;
8366
8367         while (cursize < tp->nvram_size) {
8368                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8369                         return;
8370
8371                 if (swab32(val) == TG3_EEPROM_MAGIC)
8372                         break;
8373
8374                 cursize <<= 1;
8375         }
8376
8377         tp->nvram_size = cursize;
8378 }
8379                 
8380 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8381 {
8382         u32 val;
8383
8384         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8385                 if (val != 0) {
8386                         tp->nvram_size = (val >> 16) * 1024;
8387                         return;
8388                 }
8389         }
8390         tp->nvram_size = 0x20000;
8391 }
8392
8393 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8394 {
8395         u32 nvcfg1;
8396
8397         nvcfg1 = tr32(NVRAM_CFG1);
8398         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8399                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8400         }
8401         else {
8402                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8403                 tw32(NVRAM_CFG1, nvcfg1);
8404         }
8405
8406         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8407             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8408                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8409                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8410                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8411                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8412                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8413                                 break;
8414                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8415                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8416                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8417                                 break;
8418                         case FLASH_VENDOR_ATMEL_EEPROM:
8419                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8420                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8421                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8422                                 break;
8423                         case FLASH_VENDOR_ST:
8424                                 tp->nvram_jedecnum = JEDEC_ST;
8425                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8426                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8427                                 break;
8428                         case FLASH_VENDOR_SAIFUN:
8429                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8430                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8431                                 break;
8432                         case FLASH_VENDOR_SST_SMALL:
8433                         case FLASH_VENDOR_SST_LARGE:
8434                                 tp->nvram_jedecnum = JEDEC_SST;
8435                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8436                                 break;
8437                 }
8438         }
8439         else {
8440                 tp->nvram_jedecnum = JEDEC_ATMEL;
8441                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8442                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8443         }
8444 }
8445
8446 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8447 {
8448         u32 nvcfg1;
8449
8450         nvcfg1 = tr32(NVRAM_CFG1);
8451
8452         /* NVRAM protection for TPM */
8453         if (nvcfg1 & (1 << 27))
8454                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8455
8456         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8457                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8458                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8459                         tp->nvram_jedecnum = JEDEC_ATMEL;
8460                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8461                         break;
8462                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8463                         tp->nvram_jedecnum = JEDEC_ATMEL;
8464                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8465                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8466                         break;
8467                 case FLASH_5752VENDOR_ST_M45PE10:
8468                 case FLASH_5752VENDOR_ST_M45PE20:
8469                 case FLASH_5752VENDOR_ST_M45PE40:
8470                         tp->nvram_jedecnum = JEDEC_ST;
8471                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8472                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8473                         break;
8474         }
8475
8476         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8477                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8478                         case FLASH_5752PAGE_SIZE_256:
8479                                 tp->nvram_pagesize = 256;
8480                                 break;
8481                         case FLASH_5752PAGE_SIZE_512:
8482                                 tp->nvram_pagesize = 512;
8483                                 break;
8484                         case FLASH_5752PAGE_SIZE_1K:
8485                                 tp->nvram_pagesize = 1024;
8486                                 break;
8487                         case FLASH_5752PAGE_SIZE_2K:
8488                                 tp->nvram_pagesize = 2048;
8489                                 break;
8490                         case FLASH_5752PAGE_SIZE_4K:
8491                                 tp->nvram_pagesize = 4096;
8492                                 break;
8493                         case FLASH_5752PAGE_SIZE_264:
8494                                 tp->nvram_pagesize = 264;
8495                                 break;
8496                 }
8497         }
8498         else {
8499                 /* For eeprom, set pagesize to maximum eeprom size */
8500                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8501
8502                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8503                 tw32(NVRAM_CFG1, nvcfg1);
8504         }
8505 }
8506
8507 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8508 static void __devinit tg3_nvram_init(struct tg3 *tp)
8509 {
8510         int j;
8511
8512         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8513                 return;
8514
8515         tw32_f(GRC_EEPROM_ADDR,
8516              (EEPROM_ADDR_FSM_RESET |
8517               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8518                EEPROM_ADDR_CLKPERD_SHIFT)));
8519
8520         /* XXX schedule_timeout() ... */
8521         for (j = 0; j < 100; j++)
8522                 udelay(10);
8523
8524         /* Enable seeprom accesses. */
8525         tw32_f(GRC_LOCAL_CTRL,
8526              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8527         udelay(100);
8528
8529         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8530             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8531                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8532
8533                 tg3_enable_nvram_access(tp);
8534
8535                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8536                         tg3_get_5752_nvram_info(tp);
8537                 else
8538                         tg3_get_nvram_info(tp);
8539
8540                 tg3_get_nvram_size(tp);
8541
8542                 tg3_disable_nvram_access(tp);
8543
8544         } else {
8545                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8546
8547                 tg3_get_eeprom_size(tp);
8548         }
8549 }
8550
8551 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8552                                         u32 offset, u32 *val)
8553 {
8554         u32 tmp;
8555         int i;
8556
8557         if (offset > EEPROM_ADDR_ADDR_MASK ||
8558             (offset % 4) != 0)
8559                 return -EINVAL;
8560
8561         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8562                                         EEPROM_ADDR_DEVID_MASK |
8563                                         EEPROM_ADDR_READ);
8564         tw32(GRC_EEPROM_ADDR,
8565              tmp |
8566              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8567              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8568               EEPROM_ADDR_ADDR_MASK) |
8569              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8570
8571         for (i = 0; i < 10000; i++) {
8572                 tmp = tr32(GRC_EEPROM_ADDR);
8573
8574                 if (tmp & EEPROM_ADDR_COMPLETE)
8575                         break;
8576                 udelay(100);
8577         }
8578         if (!(tmp & EEPROM_ADDR_COMPLETE))
8579                 return -EBUSY;
8580
8581         *val = tr32(GRC_EEPROM_DATA);
8582         return 0;
8583 }
8584
8585 #define NVRAM_CMD_TIMEOUT 10000
8586
8587 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8588 {
8589         int i;
8590
8591         tw32(NVRAM_CMD, nvram_cmd);
8592         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8593                 udelay(10);
8594                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8595                         udelay(10);
8596                         break;
8597                 }
8598         }
8599         if (i == NVRAM_CMD_TIMEOUT) {
8600                 return -EBUSY;
8601         }
8602         return 0;
8603 }
8604
8605 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8606 {
8607         int ret;
8608
8609         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8610                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8611                 return -EINVAL;
8612         }
8613
8614         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8615                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8616
8617         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8618                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8619                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8620
8621                 offset = ((offset / tp->nvram_pagesize) <<
8622                           ATMEL_AT45DB0X1B_PAGE_POS) +
8623                         (offset % tp->nvram_pagesize);
8624         }
8625
8626         if (offset > NVRAM_ADDR_MSK)
8627                 return -EINVAL;
8628
8629         tg3_nvram_lock(tp);
8630
8631         tg3_enable_nvram_access(tp);
8632
8633         tw32(NVRAM_ADDR, offset);
8634         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8635                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8636
8637         if (ret == 0)
8638                 *val = swab32(tr32(NVRAM_RDDATA));
8639
8640         tg3_nvram_unlock(tp);
8641
8642         tg3_disable_nvram_access(tp);
8643
8644         return ret;
8645 }
8646
8647 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8648                                     u32 offset, u32 len, u8 *buf)
8649 {
8650         int i, j, rc = 0;
8651         u32 val;
8652
8653         for (i = 0; i < len; i += 4) {
8654                 u32 addr, data;
8655
8656                 addr = offset + i;
8657
8658                 memcpy(&data, buf + i, 4);
8659
8660                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8661
8662                 val = tr32(GRC_EEPROM_ADDR);
8663                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8664
8665                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8666                         EEPROM_ADDR_READ);
8667                 tw32(GRC_EEPROM_ADDR, val |
8668                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8669                         (addr & EEPROM_ADDR_ADDR_MASK) |
8670                         EEPROM_ADDR_START |
8671                         EEPROM_ADDR_WRITE);
8672                 
8673                 for (j = 0; j < 10000; j++) {
8674                         val = tr32(GRC_EEPROM_ADDR);
8675
8676                         if (val & EEPROM_ADDR_COMPLETE)
8677                                 break;
8678                         udelay(100);
8679                 }
8680                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8681                         rc = -EBUSY;
8682                         break;
8683                 }
8684         }
8685
8686         return rc;
8687 }
8688
8689 /* offset and length are dword aligned */
8690 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8691                 u8 *buf)
8692 {
8693         int ret = 0;
8694         u32 pagesize = tp->nvram_pagesize;
8695         u32 pagemask = pagesize - 1;
8696         u32 nvram_cmd;
8697         u8 *tmp;
8698
8699         tmp = kmalloc(pagesize, GFP_KERNEL);
8700         if (tmp == NULL)
8701                 return -ENOMEM;
8702
8703         while (len) {
8704                 int j;
8705                 u32 phy_addr, page_off, size;
8706
8707                 phy_addr = offset & ~pagemask;
8708         
8709                 for (j = 0; j < pagesize; j += 4) {
8710                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8711                                                 (u32 *) (tmp + j))))
8712                                 break;
8713                 }
8714                 if (ret)
8715                         break;
8716
8717                 page_off = offset & pagemask;
8718                 size = pagesize;
8719                 if (len < size)
8720                         size = len;
8721
8722                 len -= size;
8723
8724                 memcpy(tmp + page_off, buf, size);
8725
8726                 offset = offset + (pagesize - page_off);
8727
8728                 tg3_enable_nvram_access(tp);
8729
8730                 /*
8731                  * Before we can erase the flash page, we need
8732                  * to issue a special "write enable" command.
8733                  */
8734                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8735
8736                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8737                         break;
8738
8739                 /* Erase the target page */
8740                 tw32(NVRAM_ADDR, phy_addr);
8741
8742                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8743                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8744
8745                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8746                         break;
8747
8748                 /* Issue another write enable to start the write. */
8749                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8750
8751                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8752                         break;
8753
8754                 for (j = 0; j < pagesize; j += 4) {
8755                         u32 data;
8756
8757                         data = *((u32 *) (tmp + j));
8758                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8759
8760                         tw32(NVRAM_ADDR, phy_addr + j);
8761
8762                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8763                                 NVRAM_CMD_WR;
8764
8765                         if (j == 0)
8766                                 nvram_cmd |= NVRAM_CMD_FIRST;
8767                         else if (j == (pagesize - 4))
8768                                 nvram_cmd |= NVRAM_CMD_LAST;
8769
8770                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8771                                 break;
8772                 }
8773                 if (ret)
8774                         break;
8775         }
8776
8777         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8778         tg3_nvram_exec_cmd(tp, nvram_cmd);
8779
8780         kfree(tmp);
8781
8782         return ret;
8783 }
8784
8785 /* offset and length are dword aligned */
8786 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8787                 u8 *buf)
8788 {
8789         int i, ret = 0;
8790
8791         for (i = 0; i < len; i += 4, offset += 4) {
8792                 u32 data, page_off, phy_addr, nvram_cmd;
8793
8794                 memcpy(&data, buf + i, 4);
8795                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8796
8797                 page_off = offset % tp->nvram_pagesize;
8798
8799                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8800                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8801
8802                         phy_addr = ((offset / tp->nvram_pagesize) <<
8803                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8804                 }
8805                 else {
8806                         phy_addr = offset;
8807                 }
8808
8809                 tw32(NVRAM_ADDR, phy_addr);
8810
8811                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8812
8813                 if ((page_off == 0) || (i == 0))
8814                         nvram_cmd |= NVRAM_CMD_FIRST;
8815                 else if (page_off == (tp->nvram_pagesize - 4))
8816                         nvram_cmd |= NVRAM_CMD_LAST;
8817
8818                 if (i == (len - 4))
8819                         nvram_cmd |= NVRAM_CMD_LAST;
8820
8821                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
8822                     (tp->nvram_jedecnum == JEDEC_ST) &&
8823                     (nvram_cmd & NVRAM_CMD_FIRST)) {
8824
8825                         if ((ret = tg3_nvram_exec_cmd(tp,
8826                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8827                                 NVRAM_CMD_DONE)))
8828
8829                                 break;
8830                 }
8831                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8832                         /* We always do complete word writes to eeprom. */
8833                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8834                 }
8835
8836                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8837                         break;
8838         }
8839         return ret;
8840 }
8841
8842 /* offset and length are dword aligned */
8843 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8844 {
8845         int ret;
8846
8847         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8848                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8849                 return -EINVAL;
8850         }
8851
8852         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8853                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8854                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8855                 udelay(40);
8856         }
8857
8858         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8859                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8860         }
8861         else {
8862                 u32 grc_mode;
8863
8864                 tg3_nvram_lock(tp);
8865
8866                 tg3_enable_nvram_access(tp);
8867                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8868                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8869                         tw32(NVRAM_WRITE1, 0x406);
8870
8871                 grc_mode = tr32(GRC_MODE);
8872                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8873
8874                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8875                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8876
8877                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8878                                 buf);
8879                 }
8880                 else {
8881                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8882                                 buf);
8883                 }
8884
8885                 grc_mode = tr32(GRC_MODE);
8886                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8887
8888                 tg3_disable_nvram_access(tp);
8889                 tg3_nvram_unlock(tp);
8890         }
8891
8892         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8893                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8894                 udelay(40);
8895         }
8896
8897         return ret;
8898 }
8899
8900 struct subsys_tbl_ent {
8901         u16 subsys_vendor, subsys_devid;
8902         u32 phy_id;
8903 };
8904
8905 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8906         /* Broadcom boards. */
8907         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8908         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8909         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8910         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8911         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8912         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8913         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8914         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8915         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8916         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8917         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8918
8919         /* 3com boards. */
8920         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8921         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8922         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8923         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8924         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8925
8926         /* DELL boards. */
8927         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8928         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8929         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8930         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8931
8932         /* Compaq boards. */
8933         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8934         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8935         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8936         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8937         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8938
8939         /* IBM boards. */
8940         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8941 };
8942
8943 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8944 {
8945         int i;
8946
8947         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8948                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8949                      tp->pdev->subsystem_vendor) &&
8950                     (subsys_id_to_phy_id[i].subsys_devid ==
8951                      tp->pdev->subsystem_device))
8952                         return &subsys_id_to_phy_id[i];
8953         }
8954         return NULL;
8955 }
8956
8957 /* Since this function may be called in D3-hot power state during
8958  * tg3_init_one(), only config cycles are allowed.
8959  */
8960 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8961 {
8962         u32 val;
8963
8964         /* Make sure register accesses (indirect or otherwise)
8965          * will function correctly.
8966          */
8967         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8968                                tp->misc_host_ctrl);
8969
8970         tp->phy_id = PHY_ID_INVALID;
8971         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8972
8973         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8974         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8975                 u32 nic_cfg, led_cfg;
8976                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8977                 int eeprom_phy_serdes = 0;
8978
8979                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8980                 tp->nic_sram_data_cfg = nic_cfg;
8981
8982                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8983                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8984                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8985                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8986                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8987                     (ver > 0) && (ver < 0x100))
8988                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8989
8990                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8991                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8992                         eeprom_phy_serdes = 1;
8993
8994                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8995                 if (nic_phy_id != 0) {
8996                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8997                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8998
8999                         eeprom_phy_id  = (id1 >> 16) << 10;
9000                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9001                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9002                 } else
9003                         eeprom_phy_id = 0;
9004
9005                 tp->phy_id = eeprom_phy_id;
9006                 if (eeprom_phy_serdes) {
9007                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9008                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9009                         else
9010                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9011                 }
9012
9013                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9014                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9015                                     SHASTA_EXT_LED_MODE_MASK);
9016                 else
9017                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9018
9019                 switch (led_cfg) {
9020                 default:
9021                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9022                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9023                         break;
9024
9025                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9026                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9027                         break;
9028
9029                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9030                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9031
9032                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9033                          * read on some older 5700/5701 bootcode.
9034                          */
9035                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9036                             ASIC_REV_5700 ||
9037                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9038                             ASIC_REV_5701)
9039                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9040
9041                         break;
9042
9043                 case SHASTA_EXT_LED_SHARED:
9044                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9045                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9046                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9047                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9048                                                  LED_CTRL_MODE_PHY_2);
9049                         break;
9050
9051                 case SHASTA_EXT_LED_MAC:
9052                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9053                         break;
9054
9055                 case SHASTA_EXT_LED_COMBO:
9056                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9057                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9058                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9059                                                  LED_CTRL_MODE_PHY_2);
9060                         break;
9061
9062                 };
9063
9064                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9065                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9066                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9067                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9068
9069                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9070                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9071                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9072                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9073
9074                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9075                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9076                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9077                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9078                 }
9079                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9080                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9081
9082                 if (cfg2 & (1 << 17))
9083                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9084
9085                 /* serdes signal pre-emphasis in register 0x590 set by */
9086                 /* bootcode if bit 18 is set */
9087                 if (cfg2 & (1 << 18))
9088                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9089         }
9090 }
9091
9092 static int __devinit tg3_phy_probe(struct tg3 *tp)
9093 {
9094         u32 hw_phy_id_1, hw_phy_id_2;
9095         u32 hw_phy_id, hw_phy_id_masked;
9096         int err;
9097
9098         /* Reading the PHY ID register can conflict with ASF
9099          * firwmare access to the PHY hardware.
9100          */
9101         err = 0;
9102         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9103                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9104         } else {
9105                 /* Now read the physical PHY_ID from the chip and verify
9106                  * that it is sane.  If it doesn't look good, we fall back
9107                  * to either the hard-coded table based PHY_ID and failing
9108                  * that the value found in the eeprom area.
9109                  */
9110                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9111                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9112
9113                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9114                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9115                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9116
9117                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9118         }
9119
9120         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9121                 tp->phy_id = hw_phy_id;
9122                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9123                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9124                 else
9125                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9126         } else {
9127                 if (tp->phy_id != PHY_ID_INVALID) {
9128                         /* Do nothing, phy ID already set up in
9129                          * tg3_get_eeprom_hw_cfg().
9130                          */
9131                 } else {
9132                         struct subsys_tbl_ent *p;
9133
9134                         /* No eeprom signature?  Try the hardcoded
9135                          * subsys device table.
9136                          */
9137                         p = lookup_by_subsys(tp);
9138                         if (!p)
9139                                 return -ENODEV;
9140
9141                         tp->phy_id = p->phy_id;
9142                         if (!tp->phy_id ||
9143                             tp->phy_id == PHY_ID_BCM8002)
9144                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9145                 }
9146         }
9147
9148         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9149             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9150                 u32 bmsr, adv_reg, tg3_ctrl;
9151
9152                 tg3_readphy(tp, MII_BMSR, &bmsr);
9153                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9154                     (bmsr & BMSR_LSTATUS))
9155                         goto skip_phy_reset;
9156                     
9157                 err = tg3_phy_reset(tp);
9158                 if (err)
9159                         return err;
9160
9161                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9162                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9163                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9164                 tg3_ctrl = 0;
9165                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9166                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9167                                     MII_TG3_CTRL_ADV_1000_FULL);
9168                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9169                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9170                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9171                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9172                 }
9173
9174                 if (!tg3_copper_is_advertising_all(tp)) {
9175                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9176
9177                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9178                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9179
9180                         tg3_writephy(tp, MII_BMCR,
9181                                      BMCR_ANENABLE | BMCR_ANRESTART);
9182                 }
9183                 tg3_phy_set_wirespeed(tp);
9184
9185                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9186                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9187                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9188         }
9189
9190 skip_phy_reset:
9191         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9192                 err = tg3_init_5401phy_dsp(tp);
9193                 if (err)
9194                         return err;
9195         }
9196
9197         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9198                 err = tg3_init_5401phy_dsp(tp);
9199         }
9200
9201         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9202                 tp->link_config.advertising =
9203                         (ADVERTISED_1000baseT_Half |
9204                          ADVERTISED_1000baseT_Full |
9205                          ADVERTISED_Autoneg |
9206                          ADVERTISED_FIBRE);
9207         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9208                 tp->link_config.advertising &=
9209                         ~(ADVERTISED_1000baseT_Half |
9210                           ADVERTISED_1000baseT_Full);
9211
9212         return err;
9213 }
9214
9215 static void __devinit tg3_read_partno(struct tg3 *tp)
9216 {
9217         unsigned char vpd_data[256];
9218         int i;
9219
9220         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9221                 /* Sun decided not to put the necessary bits in the
9222                  * NVRAM of their onboard tg3 parts :(
9223                  */
9224                 strcpy(tp->board_part_number, "Sun 570X");
9225                 return;
9226         }
9227
9228         for (i = 0; i < 256; i += 4) {
9229                 u32 tmp;
9230
9231                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9232                         goto out_not_found;
9233
9234                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9235                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9236                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9237                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9238         }
9239
9240         /* Now parse and find the part number. */
9241         for (i = 0; i < 256; ) {
9242                 unsigned char val = vpd_data[i];
9243                 int block_end;
9244
9245                 if (val == 0x82 || val == 0x91) {
9246                         i = (i + 3 +
9247                              (vpd_data[i + 1] +
9248                               (vpd_data[i + 2] << 8)));
9249                         continue;
9250                 }
9251
9252                 if (val != 0x90)
9253                         goto out_not_found;
9254
9255                 block_end = (i + 3 +
9256                              (vpd_data[i + 1] +
9257                               (vpd_data[i + 2] << 8)));
9258                 i += 3;
9259                 while (i < block_end) {
9260                         if (vpd_data[i + 0] == 'P' &&
9261                             vpd_data[i + 1] == 'N') {
9262                                 int partno_len = vpd_data[i + 2];
9263
9264                                 if (partno_len > 24)
9265                                         goto out_not_found;
9266
9267                                 memcpy(tp->board_part_number,
9268                                        &vpd_data[i + 3],
9269                                        partno_len);
9270
9271                                 /* Success. */
9272                                 return;
9273                         }
9274                 }
9275
9276                 /* Part number not found. */
9277                 goto out_not_found;
9278         }
9279
9280 out_not_found:
9281         strcpy(tp->board_part_number, "none");
9282 }
9283
9284 #ifdef CONFIG_SPARC64
9285 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9286 {
9287         struct pci_dev *pdev = tp->pdev;
9288         struct pcidev_cookie *pcp = pdev->sysdata;
9289
9290         if (pcp != NULL) {
9291                 int node = pcp->prom_node;
9292                 u32 venid;
9293                 int err;
9294
9295                 err = prom_getproperty(node, "subsystem-vendor-id",
9296                                        (char *) &venid, sizeof(venid));
9297                 if (err == 0 || err == -1)
9298                         return 0;
9299                 if (venid == PCI_VENDOR_ID_SUN)
9300                         return 1;
9301         }
9302         return 0;
9303 }
9304 #endif
9305
9306 static int __devinit tg3_get_invariants(struct tg3 *tp)
9307 {
9308         static struct pci_device_id write_reorder_chipsets[] = {
9309                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9310                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9311                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9312                              PCI_DEVICE_ID_VIA_8385_0) },
9313                 { },
9314         };
9315         u32 misc_ctrl_reg;
9316         u32 cacheline_sz_reg;
9317         u32 pci_state_reg, grc_misc_cfg;
9318         u32 val;
9319         u16 pci_cmd;
9320         int err;
9321
9322 #ifdef CONFIG_SPARC64
9323         if (tg3_is_sun_570X(tp))
9324                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9325 #endif
9326
9327         /* Force memory write invalidate off.  If we leave it on,
9328          * then on 5700_BX chips we have to enable a workaround.
9329          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9330          * to match the cacheline size.  The Broadcom driver have this
9331          * workaround but turns MWI off all the times so never uses
9332          * it.  This seems to suggest that the workaround is insufficient.
9333          */
9334         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9335         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9336         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9337
9338         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9339          * has the register indirect write enable bit set before
9340          * we try to access any of the MMIO registers.  It is also
9341          * critical that the PCI-X hw workaround situation is decided
9342          * before that as well.
9343          */
9344         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9345                               &misc_ctrl_reg);
9346
9347         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9348                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9349
9350         /* Wrong chip ID in 5752 A0. This code can be removed later
9351          * as A0 is not in production.
9352          */
9353         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9354                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9355
9356         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9357          * we need to disable memory and use config. cycles
9358          * only to access all registers. The 5702/03 chips
9359          * can mistakenly decode the special cycles from the
9360          * ICH chipsets as memory write cycles, causing corruption
9361          * of register and memory space. Only certain ICH bridges
9362          * will drive special cycles with non-zero data during the
9363          * address phase which can fall within the 5703's address
9364          * range. This is not an ICH bug as the PCI spec allows
9365          * non-zero address during special cycles. However, only
9366          * these ICH bridges are known to drive non-zero addresses
9367          * during special cycles.
9368          *
9369          * Since special cycles do not cross PCI bridges, we only
9370          * enable this workaround if the 5703 is on the secondary
9371          * bus of these ICH bridges.
9372          */
9373         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9374             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9375                 static struct tg3_dev_id {
9376                         u32     vendor;
9377                         u32     device;
9378                         u32     rev;
9379                 } ich_chipsets[] = {
9380                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9381                           PCI_ANY_ID },
9382                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9383                           PCI_ANY_ID },
9384                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9385                           0xa },
9386                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9387                           PCI_ANY_ID },
9388                         { },
9389                 };
9390                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9391                 struct pci_dev *bridge = NULL;
9392
9393                 while (pci_id->vendor != 0) {
9394                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9395                                                 bridge);
9396                         if (!bridge) {
9397                                 pci_id++;
9398                                 continue;
9399                         }
9400                         if (pci_id->rev != PCI_ANY_ID) {
9401                                 u8 rev;
9402
9403                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9404                                                      &rev);
9405                                 if (rev > pci_id->rev)
9406                                         continue;
9407                         }
9408                         if (bridge->subordinate &&
9409                             (bridge->subordinate->number ==
9410                              tp->pdev->bus->number)) {
9411
9412                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9413                                 pci_dev_put(bridge);
9414                                 break;
9415                         }
9416                 }
9417         }
9418
9419         /* Find msi capability. */
9420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9422                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9423                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9424         }
9425
9426         /* Initialize misc host control in PCI block. */
9427         tp->misc_host_ctrl |= (misc_ctrl_reg &
9428                                MISC_HOST_CTRL_CHIPREV);
9429         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9430                                tp->misc_host_ctrl);
9431
9432         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9433                               &cacheline_sz_reg);
9434
9435         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9436         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9437         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9438         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9439
9440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9442             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9443                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9444
9445         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9446             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9447                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9448
9449         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9450                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9451
9452         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9453             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9454             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9455                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9456
9457         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9458                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9459
9460         /* If we have an AMD 762 or VIA K8T800 chipset, write
9461          * reordering to the mailbox registers done by the host
9462          * controller can cause major troubles.  We read back from
9463          * every mailbox register write to force the writes to be
9464          * posted to the chip in order.
9465          */
9466         if (pci_dev_present(write_reorder_chipsets) &&
9467             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9468                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9469
9470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9471             tp->pci_lat_timer < 64) {
9472                 tp->pci_lat_timer = 64;
9473
9474                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9475                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9476                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9477                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9478
9479                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9480                                        cacheline_sz_reg);
9481         }
9482
9483         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9484                               &pci_state_reg);
9485
9486         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9487                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9488
9489                 /* If this is a 5700 BX chipset, and we are in PCI-X
9490                  * mode, enable register write workaround.
9491                  *
9492                  * The workaround is to use indirect register accesses
9493                  * for all chip writes not to mailbox registers.
9494                  */
9495                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9496                         u32 pm_reg;
9497                         u16 pci_cmd;
9498
9499                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9500
9501                         /* The chip can have it's power management PCI config
9502                          * space registers clobbered due to this bug.
9503                          * So explicitly force the chip into D0 here.
9504                          */
9505                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9506                                               &pm_reg);
9507                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9508                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9509                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9510                                                pm_reg);
9511
9512                         /* Also, force SERR#/PERR# in PCI command. */
9513                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9514                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9515                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9516                 }
9517         }
9518
9519         /* 5700 BX chips need to have their TX producer index mailboxes
9520          * written twice to workaround a bug.
9521          */
9522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9523                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9524
9525         /* Back to back register writes can cause problems on this chip,
9526          * the workaround is to read back all reg writes except those to
9527          * mailbox regs.  See tg3_write_indirect_reg32().
9528          *
9529          * PCI Express 5750_A0 rev chips need this workaround too.
9530          */
9531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9532             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9533              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9534                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9535
9536         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9537                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9538         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9539                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9540
9541         /* Chip-specific fixup from Broadcom driver */
9542         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9543             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9544                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9545                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9546         }
9547
9548         /* Default fast path register access methods */
9549         tp->read32 = tg3_read32;
9550         tp->write32 = tg3_write32;
9551         tp->read32_mbox = tg3_read32;
9552         tp->write32_mbox = tg3_write32;
9553         tp->write32_tx_mbox = tg3_write32;
9554         tp->write32_rx_mbox = tg3_write32;
9555
9556         /* Various workaround register access methods */
9557         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
9558                 tp->write32 = tg3_write_indirect_reg32;
9559         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
9560                 tp->write32 = tg3_write_flush_reg32;
9561
9562         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
9563             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
9564                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9565                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
9566                         tp->write32_rx_mbox = tg3_write_flush_reg32;
9567         }
9568
9569         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
9570                 tp->read32 = tg3_read_indirect_reg32;
9571                 tp->write32 = tg3_write_indirect_reg32;
9572                 tp->read32_mbox = tg3_read_indirect_mbox;
9573                 tp->write32_mbox = tg3_write_indirect_mbox;
9574                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
9575                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9576
9577                 iounmap(tp->regs);
9578                 tp->regs = NULL;
9579
9580                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9581                 pci_cmd &= ~PCI_COMMAND_MEMORY;
9582                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9583         }
9584
9585         /* Get eeprom hw config before calling tg3_set_power_state().
9586          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9587          * determined before calling tg3_set_power_state() so that
9588          * we know whether or not to switch out of Vaux power.
9589          * When the flag is set, it means that GPIO1 is used for eeprom
9590          * write protect and also implies that it is a LOM where GPIOs
9591          * are not used to switch power.
9592          */ 
9593         tg3_get_eeprom_hw_cfg(tp);
9594
9595         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9596          * GPIO1 driven high will bring 5700's external PHY out of reset.
9597          * It is also used as eeprom write protect on LOMs.
9598          */
9599         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9600         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9601             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9602                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9603                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9604         /* Unused GPIO3 must be driven as output on 5752 because there
9605          * are no pull-up resistors on unused GPIO pins.
9606          */
9607         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9608                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9609
9610         /* Force the chip into D0. */
9611         err = tg3_set_power_state(tp, 0);
9612         if (err) {
9613                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9614                        pci_name(tp->pdev));
9615                 return err;
9616         }
9617
9618         /* 5700 B0 chips do not support checksumming correctly due
9619          * to hardware bugs.
9620          */
9621         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9622                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9623
9624         /* Pseudo-header checksum is done by hardware logic and not
9625          * the offload processers, so make the chip do the pseudo-
9626          * header checksums on receive.  For transmit it is more
9627          * convenient to do the pseudo-header checksum in software
9628          * as Linux does that on transmit for us in all cases.
9629          */
9630         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9631         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9632
9633         /* Derive initial jumbo mode from MTU assigned in
9634          * ether_setup() via the alloc_etherdev() call
9635          */
9636         if (tp->dev->mtu > ETH_DATA_LEN &&
9637             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9638                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9639
9640         /* Determine WakeOnLan speed to use. */
9641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9642             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9643             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9644             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9645                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9646         } else {
9647                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9648         }
9649
9650         /* A few boards don't want Ethernet@WireSpeed phy feature */
9651         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9652             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9653              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9654              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9655             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9656                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9657
9658         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9659             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9660                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9661         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9662                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9663
9664         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9665                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9666
9667         tp->coalesce_mode = 0;
9668         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9669             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9670                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9671
9672         /* Initialize MAC MI mode, polling disabled. */
9673         tw32_f(MAC_MI_MODE, tp->mi_mode);
9674         udelay(80);
9675
9676         /* Initialize data/descriptor byte/word swapping. */
9677         val = tr32(GRC_MODE);
9678         val &= GRC_MODE_HOST_STACKUP;
9679         tw32(GRC_MODE, val | tp->grc_mode);
9680
9681         tg3_switch_clocks(tp);
9682
9683         /* Clear this out for sanity. */
9684         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9685
9686         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9687                               &pci_state_reg);
9688         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9689             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9690                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9691
9692                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9693                     chiprevid == CHIPREV_ID_5701_B0 ||
9694                     chiprevid == CHIPREV_ID_5701_B2 ||
9695                     chiprevid == CHIPREV_ID_5701_B5) {
9696                         void __iomem *sram_base;
9697
9698                         /* Write some dummy words into the SRAM status block
9699                          * area, see if it reads back correctly.  If the return
9700                          * value is bad, force enable the PCIX workaround.
9701                          */
9702                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9703
9704                         writel(0x00000000, sram_base);
9705                         writel(0x00000000, sram_base + 4);
9706                         writel(0xffffffff, sram_base + 4);
9707                         if (readl(sram_base) != 0x00000000)
9708                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9709                 }
9710         }
9711
9712         udelay(50);
9713         tg3_nvram_init(tp);
9714
9715         grc_misc_cfg = tr32(GRC_MISC_CFG);
9716         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9717
9718         /* Broadcom's driver says that CIOBE multisplit has a bug */
9719 #if 0
9720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9721             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9722                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9723                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9724         }
9725 #endif
9726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9727             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9728              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9729                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9730
9731         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9732             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9733                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9734         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9735                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9736                                       HOSTCC_MODE_CLRTICK_TXBD);
9737
9738                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9739                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9740                                        tp->misc_host_ctrl);
9741         }
9742
9743         /* these are limited to 10/100 only */
9744         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9745              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9746             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9747              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9748              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9749               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9750               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9751             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9752              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9753               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9754                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9755
9756         err = tg3_phy_probe(tp);
9757         if (err) {
9758                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9759                        pci_name(tp->pdev), err);
9760                 /* ... but do not return immediately ... */
9761         }
9762
9763         tg3_read_partno(tp);
9764
9765         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9766                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9767         } else {
9768                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9769                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9770                 else
9771                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9772         }
9773
9774         /* 5700 {AX,BX} chips have a broken status block link
9775          * change bit implementation, so we must use the
9776          * status register in those cases.
9777          */
9778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9779                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9780         else
9781                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9782
9783         /* The led_ctrl is set during tg3_phy_probe, here we might
9784          * have to force the link status polling mechanism based
9785          * upon subsystem IDs.
9786          */
9787         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9788             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9789                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9790                                   TG3_FLAG_USE_LINKCHG_REG);
9791         }
9792
9793         /* For all SERDES we poll the MAC status register. */
9794         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9795                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9796         else
9797                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9798
9799         /* It seems all chips can get confused if TX buffers
9800          * straddle the 4GB address boundary in some cases.
9801          */
9802         tp->dev->hard_start_xmit = tg3_start_xmit;
9803
9804         tp->rx_offset = 2;
9805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9806             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9807                 tp->rx_offset = 0;
9808
9809         /* By default, disable wake-on-lan.  User can change this
9810          * using ETHTOOL_SWOL.
9811          */
9812         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9813
9814         return err;
9815 }
9816
9817 #ifdef CONFIG_SPARC64
9818 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9819 {
9820         struct net_device *dev = tp->dev;
9821         struct pci_dev *pdev = tp->pdev;
9822         struct pcidev_cookie *pcp = pdev->sysdata;
9823
9824         if (pcp != NULL) {
9825                 int node = pcp->prom_node;
9826
9827                 if (prom_getproplen(node, "local-mac-address") == 6) {
9828                         prom_getproperty(node, "local-mac-address",
9829                                          dev->dev_addr, 6);
9830                         memcpy(dev->perm_addr, dev->dev_addr, 6);
9831                         return 0;
9832                 }
9833         }
9834         return -ENODEV;
9835 }
9836
9837 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9838 {
9839         struct net_device *dev = tp->dev;
9840
9841         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9842         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
9843         return 0;
9844 }
9845 #endif
9846
9847 static int __devinit tg3_get_device_address(struct tg3 *tp)
9848 {
9849         struct net_device *dev = tp->dev;
9850         u32 hi, lo, mac_offset;
9851
9852 #ifdef CONFIG_SPARC64
9853         if (!tg3_get_macaddr_sparc(tp))
9854                 return 0;
9855 #endif
9856
9857         mac_offset = 0x7c;
9858         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9859              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9860             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9861                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9862                         mac_offset = 0xcc;
9863                 if (tg3_nvram_lock(tp))
9864                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9865                 else
9866                         tg3_nvram_unlock(tp);
9867         }
9868
9869         /* First try to get it from MAC address mailbox. */
9870         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9871         if ((hi >> 16) == 0x484b) {
9872                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9873                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9874
9875                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9876                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9877                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9878                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9879                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9880         }
9881         /* Next, try NVRAM. */
9882         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9883                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9884                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9885                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9886                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9887                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9888                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9889                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9890                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9891         }
9892         /* Finally just fetch it out of the MAC control regs. */
9893         else {
9894                 hi = tr32(MAC_ADDR_0_HIGH);
9895                 lo = tr32(MAC_ADDR_0_LOW);
9896
9897                 dev->dev_addr[5] = lo & 0xff;
9898                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9899                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9900                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9901                 dev->dev_addr[1] = hi & 0xff;
9902                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9903         }
9904
9905         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9906 #ifdef CONFIG_SPARC64
9907                 if (!tg3_get_default_macaddr_sparc(tp))
9908                         return 0;
9909 #endif
9910                 return -EINVAL;
9911         }
9912         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
9913         return 0;
9914 }
9915
9916 #define BOUNDARY_SINGLE_CACHELINE       1
9917 #define BOUNDARY_MULTI_CACHELINE        2
9918
9919 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9920 {
9921         int cacheline_size;
9922         u8 byte;
9923         int goal;
9924
9925         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9926         if (byte == 0)
9927                 cacheline_size = 1024;
9928         else
9929                 cacheline_size = (int) byte * 4;
9930
9931         /* On 5703 and later chips, the boundary bits have no
9932          * effect.
9933          */
9934         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9935             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9936             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9937                 goto out;
9938
9939 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9940         goal = BOUNDARY_MULTI_CACHELINE;
9941 #else
9942 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9943         goal = BOUNDARY_SINGLE_CACHELINE;
9944 #else
9945         goal = 0;
9946 #endif
9947 #endif
9948
9949         if (!goal)
9950                 goto out;
9951
9952         /* PCI controllers on most RISC systems tend to disconnect
9953          * when a device tries to burst across a cache-line boundary.
9954          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9955          *
9956          * Unfortunately, for PCI-E there are only limited
9957          * write-side controls for this, and thus for reads
9958          * we will still get the disconnects.  We'll also waste
9959          * these PCI cycles for both read and write for chips
9960          * other than 5700 and 5701 which do not implement the
9961          * boundary bits.
9962          */
9963         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9964             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9965                 switch (cacheline_size) {
9966                 case 16:
9967                 case 32:
9968                 case 64:
9969                 case 128:
9970                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9971                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9972                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9973                         } else {
9974                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9975                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9976                         }
9977                         break;
9978
9979                 case 256:
9980                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9981                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9982                         break;
9983
9984                 default:
9985                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9986                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9987                         break;
9988                 };
9989         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9990                 switch (cacheline_size) {
9991                 case 16:
9992                 case 32:
9993                 case 64:
9994                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9995                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9996                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9997                                 break;
9998                         }
9999                         /* fallthrough */
10000                 case 128:
10001                 default:
10002                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10003                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10004                         break;
10005                 };
10006         } else {
10007                 switch (cacheline_size) {
10008                 case 16:
10009                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10010                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10011                                         DMA_RWCTRL_WRITE_BNDRY_16);
10012                                 break;
10013                         }
10014                         /* fallthrough */
10015                 case 32:
10016                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10017                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10018                                         DMA_RWCTRL_WRITE_BNDRY_32);
10019                                 break;
10020                         }
10021                         /* fallthrough */
10022                 case 64:
10023                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10024                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10025                                         DMA_RWCTRL_WRITE_BNDRY_64);
10026                                 break;
10027                         }
10028                         /* fallthrough */
10029                 case 128:
10030                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10031                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10032                                         DMA_RWCTRL_WRITE_BNDRY_128);
10033                                 break;
10034                         }
10035                         /* fallthrough */
10036                 case 256:
10037                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10038                                 DMA_RWCTRL_WRITE_BNDRY_256);
10039                         break;
10040                 case 512:
10041                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10042                                 DMA_RWCTRL_WRITE_BNDRY_512);
10043                         break;
10044                 case 1024:
10045                 default:
10046                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10047                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10048                         break;
10049                 };
10050         }
10051
10052 out:
10053         return val;
10054 }
10055
10056 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10057 {
10058         struct tg3_internal_buffer_desc test_desc;
10059         u32 sram_dma_descs;
10060         int i, ret;
10061
10062         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10063
10064         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10065         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10066         tw32(RDMAC_STATUS, 0);
10067         tw32(WDMAC_STATUS, 0);
10068
10069         tw32(BUFMGR_MODE, 0);
10070         tw32(FTQ_RESET, 0);
10071
10072         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10073         test_desc.addr_lo = buf_dma & 0xffffffff;
10074         test_desc.nic_mbuf = 0x00002100;
10075         test_desc.len = size;
10076
10077         /*
10078          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10079          * the *second* time the tg3 driver was getting loaded after an
10080          * initial scan.
10081          *
10082          * Broadcom tells me:
10083          *   ...the DMA engine is connected to the GRC block and a DMA
10084          *   reset may affect the GRC block in some unpredictable way...
10085          *   The behavior of resets to individual blocks has not been tested.
10086          *
10087          * Broadcom noted the GRC reset will also reset all sub-components.
10088          */
10089         if (to_device) {
10090                 test_desc.cqid_sqid = (13 << 8) | 2;
10091
10092                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10093                 udelay(40);
10094         } else {
10095                 test_desc.cqid_sqid = (16 << 8) | 7;
10096
10097                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10098                 udelay(40);
10099         }
10100         test_desc.flags = 0x00000005;
10101
10102         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10103                 u32 val;
10104
10105                 val = *(((u32 *)&test_desc) + i);
10106                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10107                                        sram_dma_descs + (i * sizeof(u32)));
10108                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10109         }
10110         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10111
10112         if (to_device) {
10113                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10114         } else {
10115                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10116         }
10117
10118         ret = -ENODEV;
10119         for (i = 0; i < 40; i++) {
10120                 u32 val;
10121
10122                 if (to_device)
10123                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10124                 else
10125                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10126                 if ((val & 0xffff) == sram_dma_descs) {
10127                         ret = 0;
10128                         break;
10129                 }
10130
10131                 udelay(100);
10132         }
10133
10134         return ret;
10135 }
10136
10137 #define TEST_BUFFER_SIZE        0x2000
10138
10139 static int __devinit tg3_test_dma(struct tg3 *tp)
10140 {
10141         dma_addr_t buf_dma;
10142         u32 *buf, saved_dma_rwctrl;
10143         int ret;
10144
10145         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10146         if (!buf) {
10147                 ret = -ENOMEM;
10148                 goto out_nofree;
10149         }
10150
10151         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10152                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10153
10154         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10155
10156         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10157                 /* DMA read watermark not used on PCIE */
10158                 tp->dma_rwctrl |= 0x00180000;
10159         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10160                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10161                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10162                         tp->dma_rwctrl |= 0x003f0000;
10163                 else
10164                         tp->dma_rwctrl |= 0x003f000f;
10165         } else {
10166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10167                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10168                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10169
10170                         if (ccval == 0x6 || ccval == 0x7)
10171                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10172
10173                         /* Set bit 23 to enable PCIX hw bug fix */
10174                         tp->dma_rwctrl |= 0x009f0000;
10175                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10176                         /* 5780 always in PCIX mode */
10177                         tp->dma_rwctrl |= 0x00144000;
10178                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10179                         /* 5714 always in PCIX mode */
10180                         tp->dma_rwctrl |= 0x00148000;
10181                 } else {
10182                         tp->dma_rwctrl |= 0x001b000f;
10183                 }
10184         }
10185
10186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10188                 tp->dma_rwctrl &= 0xfffffff0;
10189
10190         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10192                 /* Remove this if it causes problems for some boards. */
10193                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10194
10195                 /* On 5700/5701 chips, we need to set this bit.
10196                  * Otherwise the chip will issue cacheline transactions
10197                  * to streamable DMA memory with not all the byte
10198                  * enables turned on.  This is an error on several
10199                  * RISC PCI controllers, in particular sparc64.
10200                  *
10201                  * On 5703/5704 chips, this bit has been reassigned
10202                  * a different meaning.  In particular, it is used
10203                  * on those chips to enable a PCI-X workaround.
10204                  */
10205                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10206         }
10207
10208         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10209
10210 #if 0
10211         /* Unneeded, already done by tg3_get_invariants.  */
10212         tg3_switch_clocks(tp);
10213 #endif
10214
10215         ret = 0;
10216         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10217             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10218                 goto out;
10219
10220         /* It is best to perform DMA test with maximum write burst size
10221          * to expose the 5700/5701 write DMA bug.
10222          */
10223         saved_dma_rwctrl = tp->dma_rwctrl;
10224         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10225         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10226
10227         while (1) {
10228                 u32 *p = buf, i;
10229
10230                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10231                         p[i] = i;
10232
10233                 /* Send the buffer to the chip. */
10234                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10235                 if (ret) {
10236                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10237                         break;
10238                 }
10239
10240 #if 0
10241                 /* validate data reached card RAM correctly. */
10242                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10243                         u32 val;
10244                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10245                         if (le32_to_cpu(val) != p[i]) {
10246                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10247                                 /* ret = -ENODEV here? */
10248                         }
10249                         p[i] = 0;
10250                 }
10251 #endif
10252                 /* Now read it back. */
10253                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10254                 if (ret) {
10255                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10256
10257                         break;
10258                 }
10259
10260                 /* Verify it. */
10261                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10262                         if (p[i] == i)
10263                                 continue;
10264
10265                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10266                             DMA_RWCTRL_WRITE_BNDRY_16) {
10267                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10268                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10269                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10270                                 break;
10271                         } else {
10272                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10273                                 ret = -ENODEV;
10274                                 goto out;
10275                         }
10276                 }
10277
10278                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10279                         /* Success. */
10280                         ret = 0;
10281                         break;
10282                 }
10283         }
10284         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10285             DMA_RWCTRL_WRITE_BNDRY_16) {
10286                 static struct pci_device_id dma_wait_state_chipsets[] = {
10287                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10288                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10289                         { },
10290                 };
10291
10292                 /* DMA test passed without adjusting DMA boundary,
10293                  * now look for chipsets that are known to expose the
10294                  * DMA bug without failing the test.
10295                  */
10296                 if (pci_dev_present(dma_wait_state_chipsets)) {
10297                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10298                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10299                 }
10300                 else
10301                         /* Safe to use the calculated DMA boundary. */
10302                         tp->dma_rwctrl = saved_dma_rwctrl;
10303
10304                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10305         }
10306
10307 out:
10308         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10309 out_nofree:
10310         return ret;
10311 }
10312
10313 static void __devinit tg3_init_link_config(struct tg3 *tp)
10314 {
10315         tp->link_config.advertising =
10316                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10317                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10318                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10319                  ADVERTISED_Autoneg | ADVERTISED_MII);
10320         tp->link_config.speed = SPEED_INVALID;
10321         tp->link_config.duplex = DUPLEX_INVALID;
10322         tp->link_config.autoneg = AUTONEG_ENABLE;
10323         netif_carrier_off(tp->dev);
10324         tp->link_config.active_speed = SPEED_INVALID;
10325         tp->link_config.active_duplex = DUPLEX_INVALID;
10326         tp->link_config.phy_is_low_power = 0;
10327         tp->link_config.orig_speed = SPEED_INVALID;
10328         tp->link_config.orig_duplex = DUPLEX_INVALID;
10329         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10330 }
10331
10332 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10333 {
10334         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10335                 tp->bufmgr_config.mbuf_read_dma_low_water =
10336                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10337                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10338                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10339                 tp->bufmgr_config.mbuf_high_water =
10340                         DEFAULT_MB_HIGH_WATER_5705;
10341
10342                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10343                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10344                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10345                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10346                 tp->bufmgr_config.mbuf_high_water_jumbo =
10347                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10348         } else {
10349                 tp->bufmgr_config.mbuf_read_dma_low_water =
10350                         DEFAULT_MB_RDMA_LOW_WATER;
10351                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10352                         DEFAULT_MB_MACRX_LOW_WATER;
10353                 tp->bufmgr_config.mbuf_high_water =
10354                         DEFAULT_MB_HIGH_WATER;
10355
10356                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10357                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10358                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10359                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10360                 tp->bufmgr_config.mbuf_high_water_jumbo =
10361                         DEFAULT_MB_HIGH_WATER_JUMBO;
10362         }
10363
10364         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10365         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10366 }
10367
10368 static char * __devinit tg3_phy_string(struct tg3 *tp)
10369 {
10370         switch (tp->phy_id & PHY_ID_MASK) {
10371         case PHY_ID_BCM5400:    return "5400";
10372         case PHY_ID_BCM5401:    return "5401";
10373         case PHY_ID_BCM5411:    return "5411";
10374         case PHY_ID_BCM5701:    return "5701";
10375         case PHY_ID_BCM5703:    return "5703";
10376         case PHY_ID_BCM5704:    return "5704";
10377         case PHY_ID_BCM5705:    return "5705";
10378         case PHY_ID_BCM5750:    return "5750";
10379         case PHY_ID_BCM5752:    return "5752";
10380         case PHY_ID_BCM5714:    return "5714";
10381         case PHY_ID_BCM5780:    return "5780";
10382         case PHY_ID_BCM8002:    return "8002/serdes";
10383         case 0:                 return "serdes";
10384         default:                return "unknown";
10385         };
10386 }
10387
10388 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10389 {
10390         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10391                 strcpy(str, "PCI Express");
10392                 return str;
10393         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10394                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10395
10396                 strcpy(str, "PCIX:");
10397
10398                 if ((clock_ctrl == 7) ||
10399                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10400                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10401                         strcat(str, "133MHz");
10402                 else if (clock_ctrl == 0)
10403                         strcat(str, "33MHz");
10404                 else if (clock_ctrl == 2)
10405                         strcat(str, "50MHz");
10406                 else if (clock_ctrl == 4)
10407                         strcat(str, "66MHz");
10408                 else if (clock_ctrl == 6)
10409                         strcat(str, "100MHz");
10410                 else if (clock_ctrl == 7)
10411                         strcat(str, "133MHz");
10412         } else {
10413                 strcpy(str, "PCI:");
10414                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10415                         strcat(str, "66MHz");
10416                 else
10417                         strcat(str, "33MHz");
10418         }
10419         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10420                 strcat(str, ":32-bit");
10421         else
10422                 strcat(str, ":64-bit");
10423         return str;
10424 }
10425
10426 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10427 {
10428         struct pci_dev *peer;
10429         unsigned int func, devnr = tp->pdev->devfn & ~7;
10430
10431         for (func = 0; func < 8; func++) {
10432                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10433                 if (peer && peer != tp->pdev)
10434                         break;
10435                 pci_dev_put(peer);
10436         }
10437         if (!peer || peer == tp->pdev)
10438                 BUG();
10439
10440         /*
10441          * We don't need to keep the refcount elevated; there's no way
10442          * to remove one half of this device without removing the other
10443          */
10444         pci_dev_put(peer);
10445
10446         return peer;
10447 }
10448
10449 static void __devinit tg3_init_coal(struct tg3 *tp)
10450 {
10451         struct ethtool_coalesce *ec = &tp->coal;
10452
10453         memset(ec, 0, sizeof(*ec));
10454         ec->cmd = ETHTOOL_GCOALESCE;
10455         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10456         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10457         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10458         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10459         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10460         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10461         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10462         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10463         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10464
10465         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10466                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10467                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10468                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10469                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10470                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10471         }
10472
10473         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10474                 ec->rx_coalesce_usecs_irq = 0;
10475                 ec->tx_coalesce_usecs_irq = 0;
10476                 ec->stats_block_coalesce_usecs = 0;
10477         }
10478 }
10479
10480 static int __devinit tg3_init_one(struct pci_dev *pdev,
10481                                   const struct pci_device_id *ent)
10482 {
10483         static int tg3_version_printed = 0;
10484         unsigned long tg3reg_base, tg3reg_len;
10485         struct net_device *dev;
10486         struct tg3 *tp;
10487         int i, err, pci_using_dac, pm_cap;
10488         char str[40];
10489
10490         if (tg3_version_printed++ == 0)
10491                 printk(KERN_INFO "%s", version);
10492
10493         err = pci_enable_device(pdev);
10494         if (err) {
10495                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10496                        "aborting.\n");
10497                 return err;
10498         }
10499
10500         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10501                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10502                        "base address, aborting.\n");
10503                 err = -ENODEV;
10504                 goto err_out_disable_pdev;
10505         }
10506
10507         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10508         if (err) {
10509                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10510                        "aborting.\n");
10511                 goto err_out_disable_pdev;
10512         }
10513
10514         pci_set_master(pdev);
10515
10516         /* Find power-management capability. */
10517         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10518         if (pm_cap == 0) {
10519                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10520                        "aborting.\n");
10521                 err = -EIO;
10522                 goto err_out_free_res;
10523         }
10524
10525         /* Configure DMA attributes. */
10526         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10527         if (!err) {
10528                 pci_using_dac = 1;
10529                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10530                 if (err < 0) {
10531                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10532                                "for consistent allocations\n");
10533                         goto err_out_free_res;
10534                 }
10535         } else {
10536                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10537                 if (err) {
10538                         printk(KERN_ERR PFX "No usable DMA configuration, "
10539                                "aborting.\n");
10540                         goto err_out_free_res;
10541                 }
10542                 pci_using_dac = 0;
10543         }
10544
10545         tg3reg_base = pci_resource_start(pdev, 0);
10546         tg3reg_len = pci_resource_len(pdev, 0);
10547
10548         dev = alloc_etherdev(sizeof(*tp));
10549         if (!dev) {
10550                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10551                 err = -ENOMEM;
10552                 goto err_out_free_res;
10553         }
10554
10555         SET_MODULE_OWNER(dev);
10556         SET_NETDEV_DEV(dev, &pdev->dev);
10557
10558         if (pci_using_dac)
10559                 dev->features |= NETIF_F_HIGHDMA;
10560         dev->features |= NETIF_F_LLTX;
10561 #if TG3_VLAN_TAG_USED
10562         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10563         dev->vlan_rx_register = tg3_vlan_rx_register;
10564         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10565 #endif
10566
10567         tp = netdev_priv(dev);
10568         tp->pdev = pdev;
10569         tp->dev = dev;
10570         tp->pm_cap = pm_cap;
10571         tp->mac_mode = TG3_DEF_MAC_MODE;
10572         tp->rx_mode = TG3_DEF_RX_MODE;
10573         tp->tx_mode = TG3_DEF_TX_MODE;
10574         tp->mi_mode = MAC_MI_MODE_BASE;
10575         if (tg3_debug > 0)
10576                 tp->msg_enable = tg3_debug;
10577         else
10578                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10579
10580         /* The word/byte swap controls here control register access byte
10581          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10582          * setting below.
10583          */
10584         tp->misc_host_ctrl =
10585                 MISC_HOST_CTRL_MASK_PCI_INT |
10586                 MISC_HOST_CTRL_WORD_SWAP |
10587                 MISC_HOST_CTRL_INDIR_ACCESS |
10588                 MISC_HOST_CTRL_PCISTATE_RW;
10589
10590         /* The NONFRM (non-frame) byte/word swap controls take effect
10591          * on descriptor entries, anything which isn't packet data.
10592          *
10593          * The StrongARM chips on the board (one for tx, one for rx)
10594          * are running in big-endian mode.
10595          */
10596         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10597                         GRC_MODE_WSWAP_NONFRM_DATA);
10598 #ifdef __BIG_ENDIAN
10599         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10600 #endif
10601         spin_lock_init(&tp->lock);
10602         spin_lock_init(&tp->tx_lock);
10603         spin_lock_init(&tp->indirect_lock);
10604         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10605
10606         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10607         if (tp->regs == 0UL) {
10608                 printk(KERN_ERR PFX "Cannot map device registers, "
10609                        "aborting.\n");
10610                 err = -ENOMEM;
10611                 goto err_out_free_dev;
10612         }
10613
10614         tg3_init_link_config(tp);
10615
10616         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10617         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10618         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10619
10620         dev->open = tg3_open;
10621         dev->stop = tg3_close;
10622         dev->get_stats = tg3_get_stats;
10623         dev->set_multicast_list = tg3_set_rx_mode;
10624         dev->set_mac_address = tg3_set_mac_addr;
10625         dev->do_ioctl = tg3_ioctl;
10626         dev->tx_timeout = tg3_tx_timeout;
10627         dev->poll = tg3_poll;
10628         dev->ethtool_ops = &tg3_ethtool_ops;
10629         dev->weight = 64;
10630         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10631         dev->change_mtu = tg3_change_mtu;
10632         dev->irq = pdev->irq;
10633 #ifdef CONFIG_NET_POLL_CONTROLLER
10634         dev->poll_controller = tg3_poll_controller;
10635 #endif
10636
10637         err = tg3_get_invariants(tp);
10638         if (err) {
10639                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10640                        "aborting.\n");
10641                 goto err_out_iounmap;
10642         }
10643
10644         tg3_init_bufmgr_config(tp);
10645
10646 #if TG3_TSO_SUPPORT != 0
10647         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10648                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10649         }
10650         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10651             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10652             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10653             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10654                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10655         } else {
10656                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10657         }
10658
10659         /* TSO is off by default, user can enable using ethtool.  */
10660 #if 0
10661         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10662                 dev->features |= NETIF_F_TSO;
10663 #endif
10664
10665 #endif
10666
10667         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10668             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10669             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10670                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10671                 tp->rx_pending = 63;
10672         }
10673
10674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10675                 tp->pdev_peer = tg3_find_5704_peer(tp);
10676
10677         err = tg3_get_device_address(tp);
10678         if (err) {
10679                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10680                        "aborting.\n");
10681                 goto err_out_iounmap;
10682         }
10683
10684         /*
10685          * Reset chip in case UNDI or EFI driver did not shutdown
10686          * DMA self test will enable WDMAC and we'll see (spurious)
10687          * pending DMA on the PCI bus at that point.
10688          */
10689         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10690             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10691                 pci_save_state(tp->pdev);
10692                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10693                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10694         }
10695
10696         err = tg3_test_dma(tp);
10697         if (err) {
10698                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10699                 goto err_out_iounmap;
10700         }
10701
10702         /* Tigon3 can do ipv4 only... and some chips have buggy
10703          * checksumming.
10704          */
10705         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10706                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10707                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10708         } else
10709                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10710
10711         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10712                 dev->features &= ~NETIF_F_HIGHDMA;
10713
10714         /* flow control autonegotiation is default behavior */
10715         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10716
10717         tg3_init_coal(tp);
10718
10719         /* Now that we have fully setup the chip, save away a snapshot
10720          * of the PCI config space.  We need to restore this after
10721          * GRC_MISC_CFG core clock resets and some resume events.
10722          */
10723         pci_save_state(tp->pdev);
10724
10725         err = register_netdev(dev);
10726         if (err) {
10727                 printk(KERN_ERR PFX "Cannot register net device, "
10728                        "aborting.\n");
10729                 goto err_out_iounmap;
10730         }
10731
10732         pci_set_drvdata(pdev, dev);
10733
10734         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10735                dev->name,
10736                tp->board_part_number,
10737                tp->pci_chip_rev_id,
10738                tg3_phy_string(tp),
10739                tg3_bus_string(tp, str),
10740                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10741
10742         for (i = 0; i < 6; i++)
10743                 printk("%2.2x%c", dev->dev_addr[i],
10744                        i == 5 ? '\n' : ':');
10745
10746         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10747                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10748                "TSOcap[%d] \n",
10749                dev->name,
10750                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10751                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10752                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10753                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10754                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10755                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10756                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10757         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10758                dev->name, tp->dma_rwctrl);
10759
10760         return 0;
10761
10762 err_out_iounmap:
10763         if (tp->regs) {
10764                 iounmap(tp->regs);
10765                 tp->regs = NULL;
10766         }
10767
10768 err_out_free_dev:
10769         free_netdev(dev);
10770
10771 err_out_free_res:
10772         pci_release_regions(pdev);
10773
10774 err_out_disable_pdev:
10775         pci_disable_device(pdev);
10776         pci_set_drvdata(pdev, NULL);
10777         return err;
10778 }
10779
10780 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10781 {
10782         struct net_device *dev = pci_get_drvdata(pdev);
10783
10784         if (dev) {
10785                 struct tg3 *tp = netdev_priv(dev);
10786
10787                 unregister_netdev(dev);
10788                 if (tp->regs) {
10789                         iounmap(tp->regs);
10790                         tp->regs = NULL;
10791                 }
10792                 free_netdev(dev);
10793                 pci_release_regions(pdev);
10794                 pci_disable_device(pdev);
10795                 pci_set_drvdata(pdev, NULL);
10796         }
10797 }
10798
10799 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10800 {
10801         struct net_device *dev = pci_get_drvdata(pdev);
10802         struct tg3 *tp = netdev_priv(dev);
10803         int err;
10804
10805         if (!netif_running(dev))
10806                 return 0;
10807
10808         tg3_netif_stop(tp);
10809
10810         del_timer_sync(&tp->timer);
10811
10812         tg3_full_lock(tp, 1);
10813         tg3_disable_ints(tp);
10814         tg3_full_unlock(tp);
10815
10816         netif_device_detach(dev);
10817
10818         tg3_full_lock(tp, 0);
10819         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10820         tg3_full_unlock(tp);
10821
10822         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10823         if (err) {
10824                 tg3_full_lock(tp, 0);
10825
10826                 tg3_init_hw(tp);
10827
10828                 tp->timer.expires = jiffies + tp->timer_offset;
10829                 add_timer(&tp->timer);
10830
10831                 netif_device_attach(dev);
10832                 tg3_netif_start(tp);
10833
10834                 tg3_full_unlock(tp);
10835         }
10836
10837         return err;
10838 }
10839
10840 static int tg3_resume(struct pci_dev *pdev)
10841 {
10842         struct net_device *dev = pci_get_drvdata(pdev);
10843         struct tg3 *tp = netdev_priv(dev);
10844         int err;
10845
10846         if (!netif_running(dev))
10847                 return 0;
10848
10849         pci_restore_state(tp->pdev);
10850
10851         err = tg3_set_power_state(tp, 0);
10852         if (err)
10853                 return err;
10854
10855         netif_device_attach(dev);
10856
10857         tg3_full_lock(tp, 0);
10858
10859         tg3_init_hw(tp);
10860
10861         tp->timer.expires = jiffies + tp->timer_offset;
10862         add_timer(&tp->timer);
10863
10864         tg3_netif_start(tp);
10865
10866         tg3_full_unlock(tp);
10867
10868         return 0;
10869 }
10870
10871 static struct pci_driver tg3_driver = {
10872         .name           = DRV_MODULE_NAME,
10873         .id_table       = tg3_pci_tbl,
10874         .probe          = tg3_init_one,
10875         .remove         = __devexit_p(tg3_remove_one),
10876         .suspend        = tg3_suspend,
10877         .resume         = tg3_resume
10878 };
10879
10880 static int __init tg3_init(void)
10881 {
10882         return pci_module_init(&tg3_driver);
10883 }
10884
10885 static void __exit tg3_cleanup(void)
10886 {
10887         pci_unregister_driver(&tg3_driver);
10888 }
10889
10890 module_init(tg3_init);
10891 module_exit(tg3_cleanup);