Merge master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.35"
70 #define DRV_MODULE_RELDATE      "August 6, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344                 spin_lock_bh(&tp->indirect_lock);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347                 spin_unlock_bh(&tp->indirect_lock);
348         } else {
349                 writel(val, tp->regs + off);
350                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351                         readl(tp->regs + off);
352         }
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         void __iomem *mbox = tp->regs + off;
372         writel(val, mbox);
373         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374                 readl(mbox);
375 }
376
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 {
379         void __iomem *mbox = tp->regs + off;
380         writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382                 writel(val, mbox);
383         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384                 readl(mbox);
385 }
386
387 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
390
391 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg)               readl(tp->regs + (reg))
396 #define tr16(reg)               readw(tp->regs + (reg))
397 #define tr8(reg)                readb(tp->regs + (reg))
398
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400 {
401         spin_lock_bh(&tp->indirect_lock);
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404
405         /* Always leave this as zero. */
406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407         spin_unlock_bh(&tp->indirect_lock);
408 }
409
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411 {
412         spin_lock_bh(&tp->indirect_lock);
413         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415
416         /* Always leave this as zero. */
417         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418         spin_unlock_bh(&tp->indirect_lock);
419 }
420
421 static void tg3_disable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427 }
428
429 static inline void tg3_cond_int(struct tg3 *tp)
430 {
431         if (tp->hw_status->status & SD_STATUS_UPDATED)
432                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433 }
434
435 static void tg3_enable_ints(struct tg3 *tp)
436 {
437         tp->irq_sync = 0;
438         wmb();
439
440         tw32(TG3PCI_MISC_HOST_CTRL,
441              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443                      (tp->last_tag << 24));
444         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445         tg3_cond_int(tp);
446 }
447
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
449 {
450         struct tg3_hw_status *sblk = tp->hw_status;
451         unsigned int work_exists = 0;
452
453         /* check for phy events */
454         if (!(tp->tg3_flags &
455               (TG3_FLAG_USE_LINKCHG_REG |
456                TG3_FLAG_POLL_SERDES))) {
457                 if (sblk->status & SD_STATUS_LINK_CHG)
458                         work_exists = 1;
459         }
460         /* check for RX/TX work to do */
461         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
463                 work_exists = 1;
464
465         return work_exists;
466 }
467
468 /* tg3_restart_ints
469  *  similar to tg3_enable_ints, but it accurately determines whether there
470  *  is new work pending and can return without flushing the PIO write
471  *  which reenables interrupts 
472  */
473 static void tg3_restart_ints(struct tg3 *tp)
474 {
475         tw32(TG3PCI_MISC_HOST_CTRL,
476                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478                      tp->last_tag << 24);
479         mmiowb();
480
481         /* When doing tagged status, this work check is unnecessary.
482          * The last_tag we write above tells the chip which piece of
483          * work we've completed.
484          */
485         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
486             tg3_has_work(tp))
487                 tw32(HOSTCC_MODE, tp->coalesce_mode |
488                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
489 }
490
491 static inline void tg3_netif_stop(struct tg3 *tp)
492 {
493         tp->dev->trans_start = jiffies; /* prevent tx timeout */
494         netif_poll_disable(tp->dev);
495         netif_tx_disable(tp->dev);
496 }
497
498 static inline void tg3_netif_start(struct tg3 *tp)
499 {
500         netif_wake_queue(tp->dev);
501         /* NOTE: unconditional netif_wake_queue is only appropriate
502          * so long as all callers are assured to have free tx slots
503          * (such as after tg3_init_hw)
504          */
505         netif_poll_enable(tp->dev);
506         tp->hw_status->status |= SD_STATUS_UPDATED;
507         tg3_enable_ints(tp);
508 }
509
510 static void tg3_switch_clocks(struct tg3 *tp)
511 {
512         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513         u32 orig_clock_ctrl;
514
515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
516                 return;
517
518         orig_clock_ctrl = clock_ctrl;
519         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520                        CLOCK_CTRL_CLKRUN_OENABLE |
521                        0x1f);
522         tp->pci_clock_ctrl = clock_ctrl;
523
524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526                         tw32_f(TG3PCI_CLOCK_CTRL,
527                                clock_ctrl | CLOCK_CTRL_625_CORE);
528                         udelay(40);
529                 }
530         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531                 tw32_f(TG3PCI_CLOCK_CTRL,
532                      clock_ctrl |
533                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
534                 udelay(40);
535                 tw32_f(TG3PCI_CLOCK_CTRL,
536                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
537                 udelay(40);
538         }
539         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
540         udelay(40);
541 }
542
543 #define PHY_BUSY_LOOPS  5000
544
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
546 {
547         u32 frame_val;
548         unsigned int loops;
549         int ret;
550
551         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
552                 tw32_f(MAC_MI_MODE,
553                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554                 udelay(80);
555         }
556
557         *val = 0x0;
558
559         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560                       MI_COM_PHY_ADDR_MASK);
561         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562                       MI_COM_REG_ADDR_MASK);
563         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
564         
565         tw32_f(MAC_MI_COM, frame_val);
566
567         loops = PHY_BUSY_LOOPS;
568         while (loops != 0) {
569                 udelay(10);
570                 frame_val = tr32(MAC_MI_COM);
571
572                 if ((frame_val & MI_COM_BUSY) == 0) {
573                         udelay(5);
574                         frame_val = tr32(MAC_MI_COM);
575                         break;
576                 }
577                 loops -= 1;
578         }
579
580         ret = -EBUSY;
581         if (loops != 0) {
582                 *val = frame_val & MI_COM_DATA_MASK;
583                 ret = 0;
584         }
585
586         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587                 tw32_f(MAC_MI_MODE, tp->mi_mode);
588                 udelay(80);
589         }
590
591         return ret;
592 }
593
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
595 {
596         u32 frame_val;
597         unsigned int loops;
598         int ret;
599
600         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
601                 tw32_f(MAC_MI_MODE,
602                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
603                 udelay(80);
604         }
605
606         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607                       MI_COM_PHY_ADDR_MASK);
608         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609                       MI_COM_REG_ADDR_MASK);
610         frame_val |= (val & MI_COM_DATA_MASK);
611         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
612         
613         tw32_f(MAC_MI_COM, frame_val);
614
615         loops = PHY_BUSY_LOOPS;
616         while (loops != 0) {
617                 udelay(10);
618                 frame_val = tr32(MAC_MI_COM);
619                 if ((frame_val & MI_COM_BUSY) == 0) {
620                         udelay(5);
621                         frame_val = tr32(MAC_MI_COM);
622                         break;
623                 }
624                 loops -= 1;
625         }
626
627         ret = -EBUSY;
628         if (loops != 0)
629                 ret = 0;
630
631         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632                 tw32_f(MAC_MI_MODE, tp->mi_mode);
633                 udelay(80);
634         }
635
636         return ret;
637 }
638
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
640 {
641         u32 val;
642
643         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
644                 return;
645
646         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649                              (val | (1 << 15) | (1 << 4)));
650 }
651
652 static int tg3_bmcr_reset(struct tg3 *tp)
653 {
654         u32 phy_control;
655         int limit, err;
656
657         /* OK, reset it, and poll the BMCR_RESET bit until it
658          * clears or we time out.
659          */
660         phy_control = BMCR_RESET;
661         err = tg3_writephy(tp, MII_BMCR, phy_control);
662         if (err != 0)
663                 return -EBUSY;
664
665         limit = 5000;
666         while (limit--) {
667                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
668                 if (err != 0)
669                         return -EBUSY;
670
671                 if ((phy_control & BMCR_RESET) == 0) {
672                         udelay(40);
673                         break;
674                 }
675                 udelay(10);
676         }
677         if (limit <= 0)
678                 return -EBUSY;
679
680         return 0;
681 }
682
683 static int tg3_wait_macro_done(struct tg3 *tp)
684 {
685         int limit = 100;
686
687         while (limit--) {
688                 u32 tmp32;
689
690                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691                         if ((tmp32 & 0x1000) == 0)
692                                 break;
693                 }
694         }
695         if (limit <= 0)
696                 return -EBUSY;
697
698         return 0;
699 }
700
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
702 {
703         static const u32 test_pat[4][6] = {
704         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
708         };
709         int chan;
710
711         for (chan = 0; chan < 4; chan++) {
712                 int i;
713
714                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715                              (chan * 0x2000) | 0x0200);
716                 tg3_writephy(tp, 0x16, 0x0002);
717
718                 for (i = 0; i < 6; i++)
719                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
720                                      test_pat[chan][i]);
721
722                 tg3_writephy(tp, 0x16, 0x0202);
723                 if (tg3_wait_macro_done(tp)) {
724                         *resetp = 1;
725                         return -EBUSY;
726                 }
727
728                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729                              (chan * 0x2000) | 0x0200);
730                 tg3_writephy(tp, 0x16, 0x0082);
731                 if (tg3_wait_macro_done(tp)) {
732                         *resetp = 1;
733                         return -EBUSY;
734                 }
735
736                 tg3_writephy(tp, 0x16, 0x0802);
737                 if (tg3_wait_macro_done(tp)) {
738                         *resetp = 1;
739                         return -EBUSY;
740                 }
741
742                 for (i = 0; i < 6; i += 2) {
743                         u32 low, high;
744
745                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747                             tg3_wait_macro_done(tp)) {
748                                 *resetp = 1;
749                                 return -EBUSY;
750                         }
751                         low &= 0x7fff;
752                         high &= 0x000f;
753                         if (low != test_pat[chan][i] ||
754                             high != test_pat[chan][i+1]) {
755                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
758
759                                 return -EBUSY;
760                         }
761                 }
762         }
763
764         return 0;
765 }
766
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
768 {
769         int chan;
770
771         for (chan = 0; chan < 4; chan++) {
772                 int i;
773
774                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775                              (chan * 0x2000) | 0x0200);
776                 tg3_writephy(tp, 0x16, 0x0002);
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp))
781                         return -EBUSY;
782         }
783
784         return 0;
785 }
786
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
788 {
789         u32 reg32, phy9_orig;
790         int retries, do_phy_reset, err;
791
792         retries = 10;
793         do_phy_reset = 1;
794         do {
795                 if (do_phy_reset) {
796                         err = tg3_bmcr_reset(tp);
797                         if (err)
798                                 return err;
799                         do_phy_reset = 0;
800                 }
801
802                 /* Disable transmitter and interrupt.  */
803                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
804                         continue;
805
806                 reg32 |= 0x3000;
807                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
808
809                 /* Set full-duplex, 1000 mbps.  */
810                 tg3_writephy(tp, MII_BMCR,
811                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
812
813                 /* Set to master mode.  */
814                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
815                         continue;
816
817                 tg3_writephy(tp, MII_TG3_CTRL,
818                              (MII_TG3_CTRL_AS_MASTER |
819                               MII_TG3_CTRL_ENABLE_AS_MASTER));
820
821                 /* Enable SM_DSP_CLOCK and 6dB.  */
822                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
823
824                 /* Block the PHY control access.  */
825                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
827
828                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
829                 if (!err)
830                         break;
831         } while (--retries);
832
833         err = tg3_phy_reset_chanpat(tp);
834         if (err)
835                 return err;
836
837         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
839
840         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841         tg3_writephy(tp, 0x16, 0x0000);
842
843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845                 /* Set Extended packet length bit for jumbo frames */
846                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
847         }
848         else {
849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
850         }
851
852         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
853
854         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
855                 reg32 &= ~0x3000;
856                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
857         } else if (!err)
858                 err = -EBUSY;
859
860         return err;
861 }
862
863 /* This will reset the tigon3 PHY if there is no valid
864  * link unless the FORCE argument is non-zero.
865  */
866 static int tg3_phy_reset(struct tg3 *tp)
867 {
868         u32 phy_status;
869         int err;
870
871         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
872         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
873         if (err != 0)
874                 return -EBUSY;
875
876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879                 err = tg3_phy_reset_5703_4_5(tp);
880                 if (err)
881                         return err;
882                 goto out;
883         }
884
885         err = tg3_bmcr_reset(tp);
886         if (err)
887                 return err;
888
889 out:
890         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
897         }
898         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899                 tg3_writephy(tp, 0x1c, 0x8d68);
900                 tg3_writephy(tp, 0x1c, 0x8d68);
901         }
902         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
911         }
912         /* Set Extended packet length bit (bit 14) on all chips that */
913         /* support jumbo frames */
914         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915                 /* Cannot do read-modify-write on 5401 */
916                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
918                 u32 phy_reg;
919
920                 /* Set bit 14 with read-modify-write to preserve other bits */
921                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
924         }
925
926         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927          * jumbo frames transmission.
928          */
929         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
930                 u32 phy_reg;
931
932                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
934                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
935         }
936
937         tg3_phy_set_wirespeed(tp);
938         return 0;
939 }
940
941 static void tg3_frob_aux_power(struct tg3 *tp)
942 {
943         struct tg3 *tp_peer = tp;
944
945         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
946                 return;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949                 tp_peer = pci_get_drvdata(tp->pdev_peer);
950                 if (!tp_peer)
951                         BUG();
952         }
953
954
955         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960                              (GRC_LCLCTRL_GPIO_OE0 |
961                               GRC_LCLCTRL_GPIO_OE1 |
962                               GRC_LCLCTRL_GPIO_OE2 |
963                               GRC_LCLCTRL_GPIO_OUTPUT0 |
964                               GRC_LCLCTRL_GPIO_OUTPUT1));
965                         udelay(100);
966                 } else {
967                         u32 no_gpio2;
968                         u32 grc_local_ctrl;
969
970                         if (tp_peer != tp &&
971                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
972                                 return;
973
974                         /* On 5753 and variants, GPIO2 cannot be used. */
975                         no_gpio2 = tp->nic_sram_data_cfg &
976                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
977
978                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979                                          GRC_LCLCTRL_GPIO_OE1 |
980                                          GRC_LCLCTRL_GPIO_OE2 |
981                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
982                                          GRC_LCLCTRL_GPIO_OUTPUT2;
983                         if (no_gpio2) {
984                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
986                         }
987                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
988                                                 grc_local_ctrl);
989                         udelay(100);
990
991                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
992
993                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994                                                 grc_local_ctrl);
995                         udelay(100);
996
997                         if (!no_gpio2) {
998                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                                        grc_local_ctrl);
1001                                 udelay(100);
1002                         }
1003                 }
1004         } else {
1005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007                         if (tp_peer != tp &&
1008                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1009                                 return;
1010
1011                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012                              (GRC_LCLCTRL_GPIO_OE1 |
1013                               GRC_LCLCTRL_GPIO_OUTPUT1));
1014                         udelay(100);
1015
1016                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017                              (GRC_LCLCTRL_GPIO_OE1));
1018                         udelay(100);
1019
1020                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021                              (GRC_LCLCTRL_GPIO_OE1 |
1022                               GRC_LCLCTRL_GPIO_OUTPUT1));
1023                         udelay(100);
1024                 }
1025         }
1026 }
1027
1028 static int tg3_setup_phy(struct tg3 *, int);
1029
1030 #define RESET_KIND_SHUTDOWN     0
1031 #define RESET_KIND_INIT         1
1032 #define RESET_KIND_SUSPEND      2
1033
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1036
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1038 {
1039         u32 misc_host_ctrl;
1040         u16 power_control, power_caps;
1041         int pm = tp->pm_cap;
1042
1043         /* Make sure register accesses (indirect or otherwise)
1044          * will function correctly.
1045          */
1046         pci_write_config_dword(tp->pdev,
1047                                TG3PCI_MISC_HOST_CTRL,
1048                                tp->misc_host_ctrl);
1049
1050         pci_read_config_word(tp->pdev,
1051                              pm + PCI_PM_CTRL,
1052                              &power_control);
1053         power_control |= PCI_PM_CTRL_PME_STATUS;
1054         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1055         switch (state) {
1056         case 0:
1057                 power_control |= 0;
1058                 pci_write_config_word(tp->pdev,
1059                                       pm + PCI_PM_CTRL,
1060                                       power_control);
1061                 udelay(100);    /* Delay after power state change */
1062
1063                 /* Switch out of Vaux if it is not a LOM */
1064                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1066                         udelay(100);
1067                 }
1068
1069                 return 0;
1070
1071         case 1:
1072                 power_control |= 1;
1073                 break;
1074
1075         case 2:
1076                 power_control |= 2;
1077                 break;
1078
1079         case 3:
1080                 power_control |= 3;
1081                 break;
1082
1083         default:
1084                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1085                        "requested.\n",
1086                        tp->dev->name, state);
1087                 return -EINVAL;
1088         };
1089
1090         power_control |= PCI_PM_CTRL_PME_ENABLE;
1091
1092         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093         tw32(TG3PCI_MISC_HOST_CTRL,
1094              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1095
1096         if (tp->link_config.phy_is_low_power == 0) {
1097                 tp->link_config.phy_is_low_power = 1;
1098                 tp->link_config.orig_speed = tp->link_config.speed;
1099                 tp->link_config.orig_duplex = tp->link_config.duplex;
1100                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1101         }
1102
1103         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1104                 tp->link_config.speed = SPEED_10;
1105                 tp->link_config.duplex = DUPLEX_HALF;
1106                 tp->link_config.autoneg = AUTONEG_ENABLE;
1107                 tg3_setup_phy(tp, 0);
1108         }
1109
1110         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1111
1112         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1113                 u32 mac_mode;
1114
1115                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1117                         udelay(40);
1118
1119                         mac_mode = MAC_MODE_PORT_MODE_MII;
1120
1121                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1124                 } else {
1125                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1126                 }
1127
1128                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1130
1131                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1134
1135                 tw32_f(MAC_MODE, mac_mode);
1136                 udelay(100);
1137
1138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1139                 udelay(10);
1140         }
1141
1142         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1145                 u32 base_val;
1146
1147                 base_val = tp->pci_clock_ctrl;
1148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149                              CLOCK_CTRL_TXCLK_DISABLE);
1150
1151                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1152                      CLOCK_CTRL_ALTCLK |
1153                      CLOCK_CTRL_PWRDOWN_PLL133);
1154                 udelay(40);
1155         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1156                 /* do nothing */
1157         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159                 u32 newbits1, newbits2;
1160
1161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                     CLOCK_CTRL_TXCLK_DISABLE |
1165                                     CLOCK_CTRL_ALTCLK);
1166                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168                         newbits1 = CLOCK_CTRL_625_CORE;
1169                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1170                 } else {
1171                         newbits1 = CLOCK_CTRL_ALTCLK;
1172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173                 }
1174
1175                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1176                 udelay(40);
1177
1178                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1179                 udelay(40);
1180
1181                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1182                         u32 newbits3;
1183
1184                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187                                             CLOCK_CTRL_TXCLK_DISABLE |
1188                                             CLOCK_CTRL_44MHZ_CORE);
1189                         } else {
1190                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1191                         }
1192
1193                         tw32_f(TG3PCI_CLOCK_CTRL,
1194                                          tp->pci_clock_ctrl | newbits3);
1195                         udelay(40);
1196                 }
1197         }
1198
1199         tg3_frob_aux_power(tp);
1200
1201         /* Workaround for unstable PLL clock */
1202         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204                 u32 val = tr32(0x7d00);
1205
1206                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1207                 tw32(0x7d00, val);
1208                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209                         tg3_halt_cpu(tp, RX_CPU_BASE);
1210         }
1211
1212         /* Finally, set the new power state. */
1213         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214         udelay(100);    /* Delay after power state change */
1215
1216         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1217
1218         return 0;
1219 }
1220
1221 static void tg3_link_report(struct tg3 *tp)
1222 {
1223         if (!netif_carrier_ok(tp->dev)) {
1224                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1225         } else {
1226                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1227                        tp->dev->name,
1228                        (tp->link_config.active_speed == SPEED_1000 ?
1229                         1000 :
1230                         (tp->link_config.active_speed == SPEED_100 ?
1231                          100 : 10)),
1232                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1233                         "full" : "half"));
1234
1235                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1236                        "%s for RX.\n",
1237                        tp->dev->name,
1238                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1240         }
1241 }
1242
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1244 {
1245         u32 new_tg3_flags = 0;
1246         u32 old_rx_mode = tp->rx_mode;
1247         u32 old_tx_mode = tp->tx_mode;
1248
1249         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1250
1251                 /* Convert 1000BaseX flow control bits to 1000BaseT
1252                  * bits before resolving flow control.
1253                  */
1254                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1255                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1256                                        ADVERTISE_PAUSE_ASYM);
1257                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1258
1259                         if (local_adv & ADVERTISE_1000XPAUSE)
1260                                 local_adv |= ADVERTISE_PAUSE_CAP;
1261                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1262                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1263                         if (remote_adv & LPA_1000XPAUSE)
1264                                 remote_adv |= LPA_PAUSE_CAP;
1265                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1266                                 remote_adv |= LPA_PAUSE_ASYM;
1267                 }
1268
1269                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1270                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1271                                 if (remote_adv & LPA_PAUSE_CAP)
1272                                         new_tg3_flags |=
1273                                                 (TG3_FLAG_RX_PAUSE |
1274                                                 TG3_FLAG_TX_PAUSE);
1275                                 else if (remote_adv & LPA_PAUSE_ASYM)
1276                                         new_tg3_flags |=
1277                                                 (TG3_FLAG_RX_PAUSE);
1278                         } else {
1279                                 if (remote_adv & LPA_PAUSE_CAP)
1280                                         new_tg3_flags |=
1281                                                 (TG3_FLAG_RX_PAUSE |
1282                                                 TG3_FLAG_TX_PAUSE);
1283                         }
1284                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1285                         if ((remote_adv & LPA_PAUSE_CAP) &&
1286                         (remote_adv & LPA_PAUSE_ASYM))
1287                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1288                 }
1289
1290                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1291                 tp->tg3_flags |= new_tg3_flags;
1292         } else {
1293                 new_tg3_flags = tp->tg3_flags;
1294         }
1295
1296         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1297                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1298         else
1299                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1300
1301         if (old_rx_mode != tp->rx_mode) {
1302                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1303         }
1304         
1305         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1306                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1307         else
1308                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1309
1310         if (old_tx_mode != tp->tx_mode) {
1311                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1312         }
1313 }
1314
1315 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1316 {
1317         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1318         case MII_TG3_AUX_STAT_10HALF:
1319                 *speed = SPEED_10;
1320                 *duplex = DUPLEX_HALF;
1321                 break;
1322
1323         case MII_TG3_AUX_STAT_10FULL:
1324                 *speed = SPEED_10;
1325                 *duplex = DUPLEX_FULL;
1326                 break;
1327
1328         case MII_TG3_AUX_STAT_100HALF:
1329                 *speed = SPEED_100;
1330                 *duplex = DUPLEX_HALF;
1331                 break;
1332
1333         case MII_TG3_AUX_STAT_100FULL:
1334                 *speed = SPEED_100;
1335                 *duplex = DUPLEX_FULL;
1336                 break;
1337
1338         case MII_TG3_AUX_STAT_1000HALF:
1339                 *speed = SPEED_1000;
1340                 *duplex = DUPLEX_HALF;
1341                 break;
1342
1343         case MII_TG3_AUX_STAT_1000FULL:
1344                 *speed = SPEED_1000;
1345                 *duplex = DUPLEX_FULL;
1346                 break;
1347
1348         default:
1349                 *speed = SPEED_INVALID;
1350                 *duplex = DUPLEX_INVALID;
1351                 break;
1352         };
1353 }
1354
1355 static void tg3_phy_copper_begin(struct tg3 *tp)
1356 {
1357         u32 new_adv;
1358         int i;
1359
1360         if (tp->link_config.phy_is_low_power) {
1361                 /* Entering low power mode.  Disable gigabit and
1362                  * 100baseT advertisements.
1363                  */
1364                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1365
1366                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1367                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1368                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1369                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1370
1371                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1372         } else if (tp->link_config.speed == SPEED_INVALID) {
1373                 tp->link_config.advertising =
1374                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1375                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1376                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1377                          ADVERTISED_Autoneg | ADVERTISED_MII);
1378
1379                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1380                         tp->link_config.advertising &=
1381                                 ~(ADVERTISED_1000baseT_Half |
1382                                   ADVERTISED_1000baseT_Full);
1383
1384                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1385                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1386                         new_adv |= ADVERTISE_10HALF;
1387                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1388                         new_adv |= ADVERTISE_10FULL;
1389                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1390                         new_adv |= ADVERTISE_100HALF;
1391                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1392                         new_adv |= ADVERTISE_100FULL;
1393                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1394
1395                 if (tp->link_config.advertising &
1396                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1397                         new_adv = 0;
1398                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1399                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1400                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1401                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1402                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1403                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1404                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1405                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1406                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1407                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1408                 } else {
1409                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1410                 }
1411         } else {
1412                 /* Asking for a specific link mode. */
1413                 if (tp->link_config.speed == SPEED_1000) {
1414                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1415                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1416
1417                         if (tp->link_config.duplex == DUPLEX_FULL)
1418                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1419                         else
1420                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1421                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1422                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1423                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1424                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1425                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1426                 } else {
1427                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1428
1429                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1430                         if (tp->link_config.speed == SPEED_100) {
1431                                 if (tp->link_config.duplex == DUPLEX_FULL)
1432                                         new_adv |= ADVERTISE_100FULL;
1433                                 else
1434                                         new_adv |= ADVERTISE_100HALF;
1435                         } else {
1436                                 if (tp->link_config.duplex == DUPLEX_FULL)
1437                                         new_adv |= ADVERTISE_10FULL;
1438                                 else
1439                                         new_adv |= ADVERTISE_10HALF;
1440                         }
1441                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1442                 }
1443         }
1444
1445         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1446             tp->link_config.speed != SPEED_INVALID) {
1447                 u32 bmcr, orig_bmcr;
1448
1449                 tp->link_config.active_speed = tp->link_config.speed;
1450                 tp->link_config.active_duplex = tp->link_config.duplex;
1451
1452                 bmcr = 0;
1453                 switch (tp->link_config.speed) {
1454                 default:
1455                 case SPEED_10:
1456                         break;
1457
1458                 case SPEED_100:
1459                         bmcr |= BMCR_SPEED100;
1460                         break;
1461
1462                 case SPEED_1000:
1463                         bmcr |= TG3_BMCR_SPEED1000;
1464                         break;
1465                 };
1466
1467                 if (tp->link_config.duplex == DUPLEX_FULL)
1468                         bmcr |= BMCR_FULLDPLX;
1469
1470                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1471                     (bmcr != orig_bmcr)) {
1472                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1473                         for (i = 0; i < 1500; i++) {
1474                                 u32 tmp;
1475
1476                                 udelay(10);
1477                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1478                                     tg3_readphy(tp, MII_BMSR, &tmp))
1479                                         continue;
1480                                 if (!(tmp & BMSR_LSTATUS)) {
1481                                         udelay(40);
1482                                         break;
1483                                 }
1484                         }
1485                         tg3_writephy(tp, MII_BMCR, bmcr);
1486                         udelay(40);
1487                 }
1488         } else {
1489                 tg3_writephy(tp, MII_BMCR,
1490                              BMCR_ANENABLE | BMCR_ANRESTART);
1491         }
1492 }
1493
1494 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1495 {
1496         int err;
1497
1498         /* Turn off tap power management. */
1499         /* Set Extended packet length bit */
1500         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1501
1502         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1503         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1504
1505         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1506         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1507
1508         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1509         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1510
1511         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1512         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1513
1514         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1515         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1516
1517         udelay(40);
1518
1519         return err;
1520 }
1521
1522 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1523 {
1524         u32 adv_reg, all_mask;
1525
1526         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1527                 return 0;
1528
1529         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1530                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1531         if ((adv_reg & all_mask) != all_mask)
1532                 return 0;
1533         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1534                 u32 tg3_ctrl;
1535
1536                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1537                         return 0;
1538
1539                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1540                             MII_TG3_CTRL_ADV_1000_FULL);
1541                 if ((tg3_ctrl & all_mask) != all_mask)
1542                         return 0;
1543         }
1544         return 1;
1545 }
1546
1547 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1548 {
1549         int current_link_up;
1550         u32 bmsr, dummy;
1551         u16 current_speed;
1552         u8 current_duplex;
1553         int i, err;
1554
1555         tw32(MAC_EVENT, 0);
1556
1557         tw32_f(MAC_STATUS,
1558              (MAC_STATUS_SYNC_CHANGED |
1559               MAC_STATUS_CFG_CHANGED |
1560               MAC_STATUS_MI_COMPLETION |
1561               MAC_STATUS_LNKSTATE_CHANGED));
1562         udelay(40);
1563
1564         tp->mi_mode = MAC_MI_MODE_BASE;
1565         tw32_f(MAC_MI_MODE, tp->mi_mode);
1566         udelay(80);
1567
1568         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1569
1570         /* Some third-party PHYs need to be reset on link going
1571          * down.
1572          */
1573         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1574              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1575              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1576             netif_carrier_ok(tp->dev)) {
1577                 tg3_readphy(tp, MII_BMSR, &bmsr);
1578                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1579                     !(bmsr & BMSR_LSTATUS))
1580                         force_reset = 1;
1581         }
1582         if (force_reset)
1583                 tg3_phy_reset(tp);
1584
1585         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1586                 tg3_readphy(tp, MII_BMSR, &bmsr);
1587                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1588                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1589                         bmsr = 0;
1590
1591                 if (!(bmsr & BMSR_LSTATUS)) {
1592                         err = tg3_init_5401phy_dsp(tp);
1593                         if (err)
1594                                 return err;
1595
1596                         tg3_readphy(tp, MII_BMSR, &bmsr);
1597                         for (i = 0; i < 1000; i++) {
1598                                 udelay(10);
1599                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1600                                     (bmsr & BMSR_LSTATUS)) {
1601                                         udelay(40);
1602                                         break;
1603                                 }
1604                         }
1605
1606                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1607                             !(bmsr & BMSR_LSTATUS) &&
1608                             tp->link_config.active_speed == SPEED_1000) {
1609                                 err = tg3_phy_reset(tp);
1610                                 if (!err)
1611                                         err = tg3_init_5401phy_dsp(tp);
1612                                 if (err)
1613                                         return err;
1614                         }
1615                 }
1616         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1617                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1618                 /* 5701 {A0,B0} CRC bug workaround */
1619                 tg3_writephy(tp, 0x15, 0x0a75);
1620                 tg3_writephy(tp, 0x1c, 0x8c68);
1621                 tg3_writephy(tp, 0x1c, 0x8d68);
1622                 tg3_writephy(tp, 0x1c, 0x8c68);
1623         }
1624
1625         /* Clear pending interrupts... */
1626         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1627         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1628
1629         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1630                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1631         else
1632                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1633
1634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1636                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1637                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1638                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1639                 else
1640                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1641         }
1642
1643         current_link_up = 0;
1644         current_speed = SPEED_INVALID;
1645         current_duplex = DUPLEX_INVALID;
1646
1647         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1648                 u32 val;
1649
1650                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1651                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1652                 if (!(val & (1 << 10))) {
1653                         val |= (1 << 10);
1654                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1655                         goto relink;
1656                 }
1657         }
1658
1659         bmsr = 0;
1660         for (i = 0; i < 100; i++) {
1661                 tg3_readphy(tp, MII_BMSR, &bmsr);
1662                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1663                     (bmsr & BMSR_LSTATUS))
1664                         break;
1665                 udelay(40);
1666         }
1667
1668         if (bmsr & BMSR_LSTATUS) {
1669                 u32 aux_stat, bmcr;
1670
1671                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1672                 for (i = 0; i < 2000; i++) {
1673                         udelay(10);
1674                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1675                             aux_stat)
1676                                 break;
1677                 }
1678
1679                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1680                                              &current_speed,
1681                                              &current_duplex);
1682
1683                 bmcr = 0;
1684                 for (i = 0; i < 200; i++) {
1685                         tg3_readphy(tp, MII_BMCR, &bmcr);
1686                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1687                                 continue;
1688                         if (bmcr && bmcr != 0x7fff)
1689                                 break;
1690                         udelay(10);
1691                 }
1692
1693                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1694                         if (bmcr & BMCR_ANENABLE) {
1695                                 current_link_up = 1;
1696
1697                                 /* Force autoneg restart if we are exiting
1698                                  * low power mode.
1699                                  */
1700                                 if (!tg3_copper_is_advertising_all(tp))
1701                                         current_link_up = 0;
1702                         } else {
1703                                 current_link_up = 0;
1704                         }
1705                 } else {
1706                         if (!(bmcr & BMCR_ANENABLE) &&
1707                             tp->link_config.speed == current_speed &&
1708                             tp->link_config.duplex == current_duplex) {
1709                                 current_link_up = 1;
1710                         } else {
1711                                 current_link_up = 0;
1712                         }
1713                 }
1714
1715                 tp->link_config.active_speed = current_speed;
1716                 tp->link_config.active_duplex = current_duplex;
1717         }
1718
1719         if (current_link_up == 1 &&
1720             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1721             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1722                 u32 local_adv, remote_adv;
1723
1724                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1725                         local_adv = 0;
1726                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1727
1728                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1729                         remote_adv = 0;
1730
1731                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1732
1733                 /* If we are not advertising full pause capability,
1734                  * something is wrong.  Bring the link down and reconfigure.
1735                  */
1736                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1737                         current_link_up = 0;
1738                 } else {
1739                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1740                 }
1741         }
1742 relink:
1743         if (current_link_up == 0) {
1744                 u32 tmp;
1745
1746                 tg3_phy_copper_begin(tp);
1747
1748                 tg3_readphy(tp, MII_BMSR, &tmp);
1749                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1750                     (tmp & BMSR_LSTATUS))
1751                         current_link_up = 1;
1752         }
1753
1754         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1755         if (current_link_up == 1) {
1756                 if (tp->link_config.active_speed == SPEED_100 ||
1757                     tp->link_config.active_speed == SPEED_10)
1758                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1759                 else
1760                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1761         } else
1762                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1763
1764         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1765         if (tp->link_config.active_duplex == DUPLEX_HALF)
1766                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1767
1768         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1770                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1771                     (current_link_up == 1 &&
1772                      tp->link_config.active_speed == SPEED_10))
1773                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1774         } else {
1775                 if (current_link_up == 1)
1776                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1777         }
1778
1779         /* ??? Without this setting Netgear GA302T PHY does not
1780          * ??? send/receive packets...
1781          */
1782         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1783             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1784                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1785                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1786                 udelay(80);
1787         }
1788
1789         tw32_f(MAC_MODE, tp->mac_mode);
1790         udelay(40);
1791
1792         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1793                 /* Polled via timer. */
1794                 tw32_f(MAC_EVENT, 0);
1795         } else {
1796                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1797         }
1798         udelay(40);
1799
1800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1801             current_link_up == 1 &&
1802             tp->link_config.active_speed == SPEED_1000 &&
1803             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1804              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1805                 udelay(120);
1806                 tw32_f(MAC_STATUS,
1807                      (MAC_STATUS_SYNC_CHANGED |
1808                       MAC_STATUS_CFG_CHANGED));
1809                 udelay(40);
1810                 tg3_write_mem(tp,
1811                               NIC_SRAM_FIRMWARE_MBOX,
1812                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1813         }
1814
1815         if (current_link_up != netif_carrier_ok(tp->dev)) {
1816                 if (current_link_up)
1817                         netif_carrier_on(tp->dev);
1818                 else
1819                         netif_carrier_off(tp->dev);
1820                 tg3_link_report(tp);
1821         }
1822
1823         return 0;
1824 }
1825
1826 struct tg3_fiber_aneginfo {
1827         int state;
1828 #define ANEG_STATE_UNKNOWN              0
1829 #define ANEG_STATE_AN_ENABLE            1
1830 #define ANEG_STATE_RESTART_INIT         2
1831 #define ANEG_STATE_RESTART              3
1832 #define ANEG_STATE_DISABLE_LINK_OK      4
1833 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1834 #define ANEG_STATE_ABILITY_DETECT       6
1835 #define ANEG_STATE_ACK_DETECT_INIT      7
1836 #define ANEG_STATE_ACK_DETECT           8
1837 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1838 #define ANEG_STATE_COMPLETE_ACK         10
1839 #define ANEG_STATE_IDLE_DETECT_INIT     11
1840 #define ANEG_STATE_IDLE_DETECT          12
1841 #define ANEG_STATE_LINK_OK              13
1842 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1843 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1844
1845         u32 flags;
1846 #define MR_AN_ENABLE            0x00000001
1847 #define MR_RESTART_AN           0x00000002
1848 #define MR_AN_COMPLETE          0x00000004
1849 #define MR_PAGE_RX              0x00000008
1850 #define MR_NP_LOADED            0x00000010
1851 #define MR_TOGGLE_TX            0x00000020
1852 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1853 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1854 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1855 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1856 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1857 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1858 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1859 #define MR_TOGGLE_RX            0x00002000
1860 #define MR_NP_RX                0x00004000
1861
1862 #define MR_LINK_OK              0x80000000
1863
1864         unsigned long link_time, cur_time;
1865
1866         u32 ability_match_cfg;
1867         int ability_match_count;
1868
1869         char ability_match, idle_match, ack_match;
1870
1871         u32 txconfig, rxconfig;
1872 #define ANEG_CFG_NP             0x00000080
1873 #define ANEG_CFG_ACK            0x00000040
1874 #define ANEG_CFG_RF2            0x00000020
1875 #define ANEG_CFG_RF1            0x00000010
1876 #define ANEG_CFG_PS2            0x00000001
1877 #define ANEG_CFG_PS1            0x00008000
1878 #define ANEG_CFG_HD             0x00004000
1879 #define ANEG_CFG_FD             0x00002000
1880 #define ANEG_CFG_INVAL          0x00001f06
1881
1882 };
1883 #define ANEG_OK         0
1884 #define ANEG_DONE       1
1885 #define ANEG_TIMER_ENAB 2
1886 #define ANEG_FAILED     -1
1887
1888 #define ANEG_STATE_SETTLE_TIME  10000
1889
1890 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1891                                    struct tg3_fiber_aneginfo *ap)
1892 {
1893         unsigned long delta;
1894         u32 rx_cfg_reg;
1895         int ret;
1896
1897         if (ap->state == ANEG_STATE_UNKNOWN) {
1898                 ap->rxconfig = 0;
1899                 ap->link_time = 0;
1900                 ap->cur_time = 0;
1901                 ap->ability_match_cfg = 0;
1902                 ap->ability_match_count = 0;
1903                 ap->ability_match = 0;
1904                 ap->idle_match = 0;
1905                 ap->ack_match = 0;
1906         }
1907         ap->cur_time++;
1908
1909         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1910                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1911
1912                 if (rx_cfg_reg != ap->ability_match_cfg) {
1913                         ap->ability_match_cfg = rx_cfg_reg;
1914                         ap->ability_match = 0;
1915                         ap->ability_match_count = 0;
1916                 } else {
1917                         if (++ap->ability_match_count > 1) {
1918                                 ap->ability_match = 1;
1919                                 ap->ability_match_cfg = rx_cfg_reg;
1920                         }
1921                 }
1922                 if (rx_cfg_reg & ANEG_CFG_ACK)
1923                         ap->ack_match = 1;
1924                 else
1925                         ap->ack_match = 0;
1926
1927                 ap->idle_match = 0;
1928         } else {
1929                 ap->idle_match = 1;
1930                 ap->ability_match_cfg = 0;
1931                 ap->ability_match_count = 0;
1932                 ap->ability_match = 0;
1933                 ap->ack_match = 0;
1934
1935                 rx_cfg_reg = 0;
1936         }
1937
1938         ap->rxconfig = rx_cfg_reg;
1939         ret = ANEG_OK;
1940
1941         switch(ap->state) {
1942         case ANEG_STATE_UNKNOWN:
1943                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1944                         ap->state = ANEG_STATE_AN_ENABLE;
1945
1946                 /* fallthru */
1947         case ANEG_STATE_AN_ENABLE:
1948                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1949                 if (ap->flags & MR_AN_ENABLE) {
1950                         ap->link_time = 0;
1951                         ap->cur_time = 0;
1952                         ap->ability_match_cfg = 0;
1953                         ap->ability_match_count = 0;
1954                         ap->ability_match = 0;
1955                         ap->idle_match = 0;
1956                         ap->ack_match = 0;
1957
1958                         ap->state = ANEG_STATE_RESTART_INIT;
1959                 } else {
1960                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1961                 }
1962                 break;
1963
1964         case ANEG_STATE_RESTART_INIT:
1965                 ap->link_time = ap->cur_time;
1966                 ap->flags &= ~(MR_NP_LOADED);
1967                 ap->txconfig = 0;
1968                 tw32(MAC_TX_AUTO_NEG, 0);
1969                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1970                 tw32_f(MAC_MODE, tp->mac_mode);
1971                 udelay(40);
1972
1973                 ret = ANEG_TIMER_ENAB;
1974                 ap->state = ANEG_STATE_RESTART;
1975
1976                 /* fallthru */
1977         case ANEG_STATE_RESTART:
1978                 delta = ap->cur_time - ap->link_time;
1979                 if (delta > ANEG_STATE_SETTLE_TIME) {
1980                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1981                 } else {
1982                         ret = ANEG_TIMER_ENAB;
1983                 }
1984                 break;
1985
1986         case ANEG_STATE_DISABLE_LINK_OK:
1987                 ret = ANEG_DONE;
1988                 break;
1989
1990         case ANEG_STATE_ABILITY_DETECT_INIT:
1991                 ap->flags &= ~(MR_TOGGLE_TX);
1992                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1993                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1994                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1995                 tw32_f(MAC_MODE, tp->mac_mode);
1996                 udelay(40);
1997
1998                 ap->state = ANEG_STATE_ABILITY_DETECT;
1999                 break;
2000
2001         case ANEG_STATE_ABILITY_DETECT:
2002                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2003                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2004                 }
2005                 break;
2006
2007         case ANEG_STATE_ACK_DETECT_INIT:
2008                 ap->txconfig |= ANEG_CFG_ACK;
2009                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2010                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2011                 tw32_f(MAC_MODE, tp->mac_mode);
2012                 udelay(40);
2013
2014                 ap->state = ANEG_STATE_ACK_DETECT;
2015
2016                 /* fallthru */
2017         case ANEG_STATE_ACK_DETECT:
2018                 if (ap->ack_match != 0) {
2019                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2020                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2021                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2022                         } else {
2023                                 ap->state = ANEG_STATE_AN_ENABLE;
2024                         }
2025                 } else if (ap->ability_match != 0 &&
2026                            ap->rxconfig == 0) {
2027                         ap->state = ANEG_STATE_AN_ENABLE;
2028                 }
2029                 break;
2030
2031         case ANEG_STATE_COMPLETE_ACK_INIT:
2032                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2033                         ret = ANEG_FAILED;
2034                         break;
2035                 }
2036                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2037                                MR_LP_ADV_HALF_DUPLEX |
2038                                MR_LP_ADV_SYM_PAUSE |
2039                                MR_LP_ADV_ASYM_PAUSE |
2040                                MR_LP_ADV_REMOTE_FAULT1 |
2041                                MR_LP_ADV_REMOTE_FAULT2 |
2042                                MR_LP_ADV_NEXT_PAGE |
2043                                MR_TOGGLE_RX |
2044                                MR_NP_RX);
2045                 if (ap->rxconfig & ANEG_CFG_FD)
2046                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2047                 if (ap->rxconfig & ANEG_CFG_HD)
2048                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2049                 if (ap->rxconfig & ANEG_CFG_PS1)
2050                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2051                 if (ap->rxconfig & ANEG_CFG_PS2)
2052                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2053                 if (ap->rxconfig & ANEG_CFG_RF1)
2054                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2055                 if (ap->rxconfig & ANEG_CFG_RF2)
2056                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2057                 if (ap->rxconfig & ANEG_CFG_NP)
2058                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2059
2060                 ap->link_time = ap->cur_time;
2061
2062                 ap->flags ^= (MR_TOGGLE_TX);
2063                 if (ap->rxconfig & 0x0008)
2064                         ap->flags |= MR_TOGGLE_RX;
2065                 if (ap->rxconfig & ANEG_CFG_NP)
2066                         ap->flags |= MR_NP_RX;
2067                 ap->flags |= MR_PAGE_RX;
2068
2069                 ap->state = ANEG_STATE_COMPLETE_ACK;
2070                 ret = ANEG_TIMER_ENAB;
2071                 break;
2072
2073         case ANEG_STATE_COMPLETE_ACK:
2074                 if (ap->ability_match != 0 &&
2075                     ap->rxconfig == 0) {
2076                         ap->state = ANEG_STATE_AN_ENABLE;
2077                         break;
2078                 }
2079                 delta = ap->cur_time - ap->link_time;
2080                 if (delta > ANEG_STATE_SETTLE_TIME) {
2081                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2082                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2083                         } else {
2084                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2085                                     !(ap->flags & MR_NP_RX)) {
2086                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2087                                 } else {
2088                                         ret = ANEG_FAILED;
2089                                 }
2090                         }
2091                 }
2092                 break;
2093
2094         case ANEG_STATE_IDLE_DETECT_INIT:
2095                 ap->link_time = ap->cur_time;
2096                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2097                 tw32_f(MAC_MODE, tp->mac_mode);
2098                 udelay(40);
2099
2100                 ap->state = ANEG_STATE_IDLE_DETECT;
2101                 ret = ANEG_TIMER_ENAB;
2102                 break;
2103
2104         case ANEG_STATE_IDLE_DETECT:
2105                 if (ap->ability_match != 0 &&
2106                     ap->rxconfig == 0) {
2107                         ap->state = ANEG_STATE_AN_ENABLE;
2108                         break;
2109                 }
2110                 delta = ap->cur_time - ap->link_time;
2111                 if (delta > ANEG_STATE_SETTLE_TIME) {
2112                         /* XXX another gem from the Broadcom driver :( */
2113                         ap->state = ANEG_STATE_LINK_OK;
2114                 }
2115                 break;
2116
2117         case ANEG_STATE_LINK_OK:
2118                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2119                 ret = ANEG_DONE;
2120                 break;
2121
2122         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2123                 /* ??? unimplemented */
2124                 break;
2125
2126         case ANEG_STATE_NEXT_PAGE_WAIT:
2127                 /* ??? unimplemented */
2128                 break;
2129
2130         default:
2131                 ret = ANEG_FAILED;
2132                 break;
2133         };
2134
2135         return ret;
2136 }
2137
2138 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2139 {
2140         int res = 0;
2141         struct tg3_fiber_aneginfo aninfo;
2142         int status = ANEG_FAILED;
2143         unsigned int tick;
2144         u32 tmp;
2145
2146         tw32_f(MAC_TX_AUTO_NEG, 0);
2147
2148         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2149         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2150         udelay(40);
2151
2152         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2153         udelay(40);
2154
2155         memset(&aninfo, 0, sizeof(aninfo));
2156         aninfo.flags |= MR_AN_ENABLE;
2157         aninfo.state = ANEG_STATE_UNKNOWN;
2158         aninfo.cur_time = 0;
2159         tick = 0;
2160         while (++tick < 195000) {
2161                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2162                 if (status == ANEG_DONE || status == ANEG_FAILED)
2163                         break;
2164
2165                 udelay(1);
2166         }
2167
2168         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169         tw32_f(MAC_MODE, tp->mac_mode);
2170         udelay(40);
2171
2172         *flags = aninfo.flags;
2173
2174         if (status == ANEG_DONE &&
2175             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2176                              MR_LP_ADV_FULL_DUPLEX)))
2177                 res = 1;
2178
2179         return res;
2180 }
2181
2182 static void tg3_init_bcm8002(struct tg3 *tp)
2183 {
2184         u32 mac_status = tr32(MAC_STATUS);
2185         int i;
2186
2187         /* Reset when initting first time or we have a link. */
2188         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2189             !(mac_status & MAC_STATUS_PCS_SYNCED))
2190                 return;
2191
2192         /* Set PLL lock range. */
2193         tg3_writephy(tp, 0x16, 0x8007);
2194
2195         /* SW reset */
2196         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2197
2198         /* Wait for reset to complete. */
2199         /* XXX schedule_timeout() ... */
2200         for (i = 0; i < 500; i++)
2201                 udelay(10);
2202
2203         /* Config mode; select PMA/Ch 1 regs. */
2204         tg3_writephy(tp, 0x10, 0x8411);
2205
2206         /* Enable auto-lock and comdet, select txclk for tx. */
2207         tg3_writephy(tp, 0x11, 0x0a10);
2208
2209         tg3_writephy(tp, 0x18, 0x00a0);
2210         tg3_writephy(tp, 0x16, 0x41ff);
2211
2212         /* Assert and deassert POR. */
2213         tg3_writephy(tp, 0x13, 0x0400);
2214         udelay(40);
2215         tg3_writephy(tp, 0x13, 0x0000);
2216
2217         tg3_writephy(tp, 0x11, 0x0a50);
2218         udelay(40);
2219         tg3_writephy(tp, 0x11, 0x0a10);
2220
2221         /* Wait for signal to stabilize */
2222         /* XXX schedule_timeout() ... */
2223         for (i = 0; i < 15000; i++)
2224                 udelay(10);
2225
2226         /* Deselect the channel register so we can read the PHYID
2227          * later.
2228          */
2229         tg3_writephy(tp, 0x10, 0x8011);
2230 }
2231
2232 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2233 {
2234         u32 sg_dig_ctrl, sg_dig_status;
2235         u32 serdes_cfg, expected_sg_dig_ctrl;
2236         int workaround, port_a;
2237         int current_link_up;
2238
2239         serdes_cfg = 0;
2240         expected_sg_dig_ctrl = 0;
2241         workaround = 0;
2242         port_a = 1;
2243         current_link_up = 0;
2244
2245         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2246             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2247                 workaround = 1;
2248                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2249                         port_a = 0;
2250
2251                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2252                 /* preserve bits 20-23 for voltage regulator */
2253                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2254         }
2255
2256         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2257
2258         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2259                 if (sg_dig_ctrl & (1 << 31)) {
2260                         if (workaround) {
2261                                 u32 val = serdes_cfg;
2262
2263                                 if (port_a)
2264                                         val |= 0xc010000;
2265                                 else
2266                                         val |= 0x4010000;
2267                                 tw32_f(MAC_SERDES_CFG, val);
2268                         }
2269                         tw32_f(SG_DIG_CTRL, 0x01388400);
2270                 }
2271                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2272                         tg3_setup_flow_control(tp, 0, 0);
2273                         current_link_up = 1;
2274                 }
2275                 goto out;
2276         }
2277
2278         /* Want auto-negotiation.  */
2279         expected_sg_dig_ctrl = 0x81388400;
2280
2281         /* Pause capability */
2282         expected_sg_dig_ctrl |= (1 << 11);
2283
2284         /* Asymettric pause */
2285         expected_sg_dig_ctrl |= (1 << 12);
2286
2287         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2288                 if (workaround)
2289                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2290                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2291                 udelay(5);
2292                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2293
2294                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2295         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2296                                  MAC_STATUS_SIGNAL_DET)) {
2297                 int i;
2298
2299                 /* Giver time to negotiate (~200ms) */
2300                 for (i = 0; i < 40000; i++) {
2301                         sg_dig_status = tr32(SG_DIG_STATUS);
2302                         if (sg_dig_status & (0x3))
2303                                 break;
2304                         udelay(5);
2305                 }
2306                 mac_status = tr32(MAC_STATUS);
2307
2308                 if ((sg_dig_status & (1 << 1)) &&
2309                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2310                         u32 local_adv, remote_adv;
2311
2312                         local_adv = ADVERTISE_PAUSE_CAP;
2313                         remote_adv = 0;
2314                         if (sg_dig_status & (1 << 19))
2315                                 remote_adv |= LPA_PAUSE_CAP;
2316                         if (sg_dig_status & (1 << 20))
2317                                 remote_adv |= LPA_PAUSE_ASYM;
2318
2319                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2320                         current_link_up = 1;
2321                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2322                 } else if (!(sg_dig_status & (1 << 1))) {
2323                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2324                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2325                         else {
2326                                 if (workaround) {
2327                                         u32 val = serdes_cfg;
2328
2329                                         if (port_a)
2330                                                 val |= 0xc010000;
2331                                         else
2332                                                 val |= 0x4010000;
2333
2334                                         tw32_f(MAC_SERDES_CFG, val);
2335                                 }
2336
2337                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2338                                 udelay(40);
2339
2340                                 /* Link parallel detection - link is up */
2341                                 /* only if we have PCS_SYNC and not */
2342                                 /* receiving config code words */
2343                                 mac_status = tr32(MAC_STATUS);
2344                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2345                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2346                                         tg3_setup_flow_control(tp, 0, 0);
2347                                         current_link_up = 1;
2348                                 }
2349                         }
2350                 }
2351         }
2352
2353 out:
2354         return current_link_up;
2355 }
2356
2357 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2358 {
2359         int current_link_up = 0;
2360
2361         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2362                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2363                 goto out;
2364         }
2365
2366         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2367                 u32 flags;
2368                 int i;
2369   
2370                 if (fiber_autoneg(tp, &flags)) {
2371                         u32 local_adv, remote_adv;
2372
2373                         local_adv = ADVERTISE_PAUSE_CAP;
2374                         remote_adv = 0;
2375                         if (flags & MR_LP_ADV_SYM_PAUSE)
2376                                 remote_adv |= LPA_PAUSE_CAP;
2377                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2378                                 remote_adv |= LPA_PAUSE_ASYM;
2379
2380                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2381
2382                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2383                         current_link_up = 1;
2384                 }
2385                 for (i = 0; i < 30; i++) {
2386                         udelay(20);
2387                         tw32_f(MAC_STATUS,
2388                                (MAC_STATUS_SYNC_CHANGED |
2389                                 MAC_STATUS_CFG_CHANGED));
2390                         udelay(40);
2391                         if ((tr32(MAC_STATUS) &
2392                              (MAC_STATUS_SYNC_CHANGED |
2393                               MAC_STATUS_CFG_CHANGED)) == 0)
2394                                 break;
2395                 }
2396
2397                 mac_status = tr32(MAC_STATUS);
2398                 if (current_link_up == 0 &&
2399                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2400                     !(mac_status & MAC_STATUS_RCVD_CFG))
2401                         current_link_up = 1;
2402         } else {
2403                 /* Forcing 1000FD link up. */
2404                 current_link_up = 1;
2405                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2406
2407                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2408                 udelay(40);
2409         }
2410
2411 out:
2412         return current_link_up;
2413 }
2414
2415 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2416 {
2417         u32 orig_pause_cfg;
2418         u16 orig_active_speed;
2419         u8 orig_active_duplex;
2420         u32 mac_status;
2421         int current_link_up;
2422         int i;
2423
2424         orig_pause_cfg =
2425                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2426                                   TG3_FLAG_TX_PAUSE));
2427         orig_active_speed = tp->link_config.active_speed;
2428         orig_active_duplex = tp->link_config.active_duplex;
2429
2430         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2431             netif_carrier_ok(tp->dev) &&
2432             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2433                 mac_status = tr32(MAC_STATUS);
2434                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2435                                MAC_STATUS_SIGNAL_DET |
2436                                MAC_STATUS_CFG_CHANGED |
2437                                MAC_STATUS_RCVD_CFG);
2438                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2439                                    MAC_STATUS_SIGNAL_DET)) {
2440                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2441                                             MAC_STATUS_CFG_CHANGED));
2442                         return 0;
2443                 }
2444         }
2445
2446         tw32_f(MAC_TX_AUTO_NEG, 0);
2447
2448         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2449         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2450         tw32_f(MAC_MODE, tp->mac_mode);
2451         udelay(40);
2452
2453         if (tp->phy_id == PHY_ID_BCM8002)
2454                 tg3_init_bcm8002(tp);
2455
2456         /* Enable link change event even when serdes polling.  */
2457         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2458         udelay(40);
2459
2460         current_link_up = 0;
2461         mac_status = tr32(MAC_STATUS);
2462
2463         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2464                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2465         else
2466                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2467
2468         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2469         tw32_f(MAC_MODE, tp->mac_mode);
2470         udelay(40);
2471
2472         tp->hw_status->status =
2473                 (SD_STATUS_UPDATED |
2474                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2475
2476         for (i = 0; i < 100; i++) {
2477                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2478                                     MAC_STATUS_CFG_CHANGED));
2479                 udelay(5);
2480                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2481                                          MAC_STATUS_CFG_CHANGED)) == 0)
2482                         break;
2483         }
2484
2485         mac_status = tr32(MAC_STATUS);
2486         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2487                 current_link_up = 0;
2488                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2489                         tw32_f(MAC_MODE, (tp->mac_mode |
2490                                           MAC_MODE_SEND_CONFIGS));
2491                         udelay(1);
2492                         tw32_f(MAC_MODE, tp->mac_mode);
2493                 }
2494         }
2495
2496         if (current_link_up == 1) {
2497                 tp->link_config.active_speed = SPEED_1000;
2498                 tp->link_config.active_duplex = DUPLEX_FULL;
2499                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2500                                     LED_CTRL_LNKLED_OVERRIDE |
2501                                     LED_CTRL_1000MBPS_ON));
2502         } else {
2503                 tp->link_config.active_speed = SPEED_INVALID;
2504                 tp->link_config.active_duplex = DUPLEX_INVALID;
2505                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2506                                     LED_CTRL_LNKLED_OVERRIDE |
2507                                     LED_CTRL_TRAFFIC_OVERRIDE));
2508         }
2509
2510         if (current_link_up != netif_carrier_ok(tp->dev)) {
2511                 if (current_link_up)
2512                         netif_carrier_on(tp->dev);
2513                 else
2514                         netif_carrier_off(tp->dev);
2515                 tg3_link_report(tp);
2516         } else {
2517                 u32 now_pause_cfg =
2518                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2519                                          TG3_FLAG_TX_PAUSE);
2520                 if (orig_pause_cfg != now_pause_cfg ||
2521                     orig_active_speed != tp->link_config.active_speed ||
2522                     orig_active_duplex != tp->link_config.active_duplex)
2523                         tg3_link_report(tp);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2530 {
2531         int current_link_up, err = 0;
2532         u32 bmsr, bmcr;
2533         u16 current_speed;
2534         u8 current_duplex;
2535
2536         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2537         tw32_f(MAC_MODE, tp->mac_mode);
2538         udelay(40);
2539
2540         tw32(MAC_EVENT, 0);
2541
2542         tw32_f(MAC_STATUS,
2543              (MAC_STATUS_SYNC_CHANGED |
2544               MAC_STATUS_CFG_CHANGED |
2545               MAC_STATUS_MI_COMPLETION |
2546               MAC_STATUS_LNKSTATE_CHANGED));
2547         udelay(40);
2548
2549         if (force_reset)
2550                 tg3_phy_reset(tp);
2551
2552         current_link_up = 0;
2553         current_speed = SPEED_INVALID;
2554         current_duplex = DUPLEX_INVALID;
2555
2556         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2557         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2558
2559         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2560
2561         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2562             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2563                 /* do nothing, just check for link up at the end */
2564         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2565                 u32 adv, new_adv;
2566
2567                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2568                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2569                                   ADVERTISE_1000XPAUSE |
2570                                   ADVERTISE_1000XPSE_ASYM |
2571                                   ADVERTISE_SLCT);
2572
2573                 /* Always advertise symmetric PAUSE just like copper */
2574                 new_adv |= ADVERTISE_1000XPAUSE;
2575
2576                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2577                         new_adv |= ADVERTISE_1000XHALF;
2578                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2579                         new_adv |= ADVERTISE_1000XFULL;
2580
2581                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2582                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2583                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2584                         tg3_writephy(tp, MII_BMCR, bmcr);
2585
2586                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2587                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2588                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2589
2590                         return err;
2591                 }
2592         } else {
2593                 u32 new_bmcr;
2594
2595                 bmcr &= ~BMCR_SPEED1000;
2596                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2597
2598                 if (tp->link_config.duplex == DUPLEX_FULL)
2599                         new_bmcr |= BMCR_FULLDPLX;
2600
2601                 if (new_bmcr != bmcr) {
2602                         /* BMCR_SPEED1000 is a reserved bit that needs
2603                          * to be set on write.
2604                          */
2605                         new_bmcr |= BMCR_SPEED1000;
2606
2607                         /* Force a linkdown */
2608                         if (netif_carrier_ok(tp->dev)) {
2609                                 u32 adv;
2610
2611                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2612                                 adv &= ~(ADVERTISE_1000XFULL |
2613                                          ADVERTISE_1000XHALF |
2614                                          ADVERTISE_SLCT);
2615                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2616                                 tg3_writephy(tp, MII_BMCR, bmcr |
2617                                                            BMCR_ANRESTART |
2618                                                            BMCR_ANENABLE);
2619                                 udelay(10);
2620                                 netif_carrier_off(tp->dev);
2621                         }
2622                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2623                         bmcr = new_bmcr;
2624                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2625                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2626                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2627                 }
2628         }
2629
2630         if (bmsr & BMSR_LSTATUS) {
2631                 current_speed = SPEED_1000;
2632                 current_link_up = 1;
2633                 if (bmcr & BMCR_FULLDPLX)
2634                         current_duplex = DUPLEX_FULL;
2635                 else
2636                         current_duplex = DUPLEX_HALF;
2637
2638                 if (bmcr & BMCR_ANENABLE) {
2639                         u32 local_adv, remote_adv, common;
2640
2641                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2642                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2643                         common = local_adv & remote_adv;
2644                         if (common & (ADVERTISE_1000XHALF |
2645                                       ADVERTISE_1000XFULL)) {
2646                                 if (common & ADVERTISE_1000XFULL)
2647                                         current_duplex = DUPLEX_FULL;
2648                                 else
2649                                         current_duplex = DUPLEX_HALF;
2650
2651                                 tg3_setup_flow_control(tp, local_adv,
2652                                                        remote_adv);
2653                         }
2654                         else
2655                                 current_link_up = 0;
2656                 }
2657         }
2658
2659         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2660         if (tp->link_config.active_duplex == DUPLEX_HALF)
2661                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2662
2663         tw32_f(MAC_MODE, tp->mac_mode);
2664         udelay(40);
2665
2666         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2667
2668         tp->link_config.active_speed = current_speed;
2669         tp->link_config.active_duplex = current_duplex;
2670
2671         if (current_link_up != netif_carrier_ok(tp->dev)) {
2672                 if (current_link_up)
2673                         netif_carrier_on(tp->dev);
2674                 else {
2675                         netif_carrier_off(tp->dev);
2676                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2677                 }
2678                 tg3_link_report(tp);
2679         }
2680         return err;
2681 }
2682
2683 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2684 {
2685         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2686                 /* Give autoneg time to complete. */
2687                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2688                 return;
2689         }
2690         if (!netif_carrier_ok(tp->dev) &&
2691             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2692                 u32 bmcr;
2693
2694                 tg3_readphy(tp, MII_BMCR, &bmcr);
2695                 if (bmcr & BMCR_ANENABLE) {
2696                         u32 phy1, phy2;
2697
2698                         /* Select shadow register 0x1f */
2699                         tg3_writephy(tp, 0x1c, 0x7c00);
2700                         tg3_readphy(tp, 0x1c, &phy1);
2701
2702                         /* Select expansion interrupt status register */
2703                         tg3_writephy(tp, 0x17, 0x0f01);
2704                         tg3_readphy(tp, 0x15, &phy2);
2705                         tg3_readphy(tp, 0x15, &phy2);
2706
2707                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2708                                 /* We have signal detect and not receiving
2709                                  * config code words, link is up by parallel
2710                                  * detection.
2711                                  */
2712
2713                                 bmcr &= ~BMCR_ANENABLE;
2714                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2715                                 tg3_writephy(tp, MII_BMCR, bmcr);
2716                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2717                         }
2718                 }
2719         }
2720         else if (netif_carrier_ok(tp->dev) &&
2721                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2722                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2723                 u32 phy2;
2724
2725                 /* Select expansion interrupt status register */
2726                 tg3_writephy(tp, 0x17, 0x0f01);
2727                 tg3_readphy(tp, 0x15, &phy2);
2728                 if (phy2 & 0x20) {
2729                         u32 bmcr;
2730
2731                         /* Config code words received, turn on autoneg. */
2732                         tg3_readphy(tp, MII_BMCR, &bmcr);
2733                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2734
2735                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2736
2737                 }
2738         }
2739 }
2740
2741 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2742 {
2743         int err;
2744
2745         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2746                 err = tg3_setup_fiber_phy(tp, force_reset);
2747         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2748                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2749         } else {
2750                 err = tg3_setup_copper_phy(tp, force_reset);
2751         }
2752
2753         if (tp->link_config.active_speed == SPEED_1000 &&
2754             tp->link_config.active_duplex == DUPLEX_HALF)
2755                 tw32(MAC_TX_LENGTHS,
2756                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2757                       (6 << TX_LENGTHS_IPG_SHIFT) |
2758                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2759         else
2760                 tw32(MAC_TX_LENGTHS,
2761                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2762                       (6 << TX_LENGTHS_IPG_SHIFT) |
2763                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2764
2765         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2766                 if (netif_carrier_ok(tp->dev)) {
2767                         tw32(HOSTCC_STAT_COAL_TICKS,
2768                              tp->coal.stats_block_coalesce_usecs);
2769                 } else {
2770                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2771                 }
2772         }
2773
2774         return err;
2775 }
2776
2777 /* Tigon3 never reports partial packet sends.  So we do not
2778  * need special logic to handle SKBs that have not had all
2779  * of their frags sent yet, like SunGEM does.
2780  */
2781 static void tg3_tx(struct tg3 *tp)
2782 {
2783         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2784         u32 sw_idx = tp->tx_cons;
2785
2786         while (sw_idx != hw_idx) {
2787                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2788                 struct sk_buff *skb = ri->skb;
2789                 int i;
2790
2791                 if (unlikely(skb == NULL))
2792                         BUG();
2793
2794                 pci_unmap_single(tp->pdev,
2795                                  pci_unmap_addr(ri, mapping),
2796                                  skb_headlen(skb),
2797                                  PCI_DMA_TODEVICE);
2798
2799                 ri->skb = NULL;
2800
2801                 sw_idx = NEXT_TX(sw_idx);
2802
2803                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2804                         if (unlikely(sw_idx == hw_idx))
2805                                 BUG();
2806
2807                         ri = &tp->tx_buffers[sw_idx];
2808                         if (unlikely(ri->skb != NULL))
2809                                 BUG();
2810
2811                         pci_unmap_page(tp->pdev,
2812                                        pci_unmap_addr(ri, mapping),
2813                                        skb_shinfo(skb)->frags[i].size,
2814                                        PCI_DMA_TODEVICE);
2815
2816                         sw_idx = NEXT_TX(sw_idx);
2817                 }
2818
2819                 dev_kfree_skb(skb);
2820         }
2821
2822         tp->tx_cons = sw_idx;
2823
2824         if (netif_queue_stopped(tp->dev) &&
2825             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2826                 netif_wake_queue(tp->dev);
2827 }
2828
2829 /* Returns size of skb allocated or < 0 on error.
2830  *
2831  * We only need to fill in the address because the other members
2832  * of the RX descriptor are invariant, see tg3_init_rings.
2833  *
2834  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2835  * posting buffers we only dirty the first cache line of the RX
2836  * descriptor (containing the address).  Whereas for the RX status
2837  * buffers the cpu only reads the last cacheline of the RX descriptor
2838  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2839  */
2840 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2841                             int src_idx, u32 dest_idx_unmasked)
2842 {
2843         struct tg3_rx_buffer_desc *desc;
2844         struct ring_info *map, *src_map;
2845         struct sk_buff *skb;
2846         dma_addr_t mapping;
2847         int skb_size, dest_idx;
2848
2849         src_map = NULL;
2850         switch (opaque_key) {
2851         case RXD_OPAQUE_RING_STD:
2852                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2853                 desc = &tp->rx_std[dest_idx];
2854                 map = &tp->rx_std_buffers[dest_idx];
2855                 if (src_idx >= 0)
2856                         src_map = &tp->rx_std_buffers[src_idx];
2857                 skb_size = tp->rx_pkt_buf_sz;
2858                 break;
2859
2860         case RXD_OPAQUE_RING_JUMBO:
2861                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2862                 desc = &tp->rx_jumbo[dest_idx];
2863                 map = &tp->rx_jumbo_buffers[dest_idx];
2864                 if (src_idx >= 0)
2865                         src_map = &tp->rx_jumbo_buffers[src_idx];
2866                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2867                 break;
2868
2869         default:
2870                 return -EINVAL;
2871         };
2872
2873         /* Do not overwrite any of the map or rp information
2874          * until we are sure we can commit to a new buffer.
2875          *
2876          * Callers depend upon this behavior and assume that
2877          * we leave everything unchanged if we fail.
2878          */
2879         skb = dev_alloc_skb(skb_size);
2880         if (skb == NULL)
2881                 return -ENOMEM;
2882
2883         skb->dev = tp->dev;
2884         skb_reserve(skb, tp->rx_offset);
2885
2886         mapping = pci_map_single(tp->pdev, skb->data,
2887                                  skb_size - tp->rx_offset,
2888                                  PCI_DMA_FROMDEVICE);
2889
2890         map->skb = skb;
2891         pci_unmap_addr_set(map, mapping, mapping);
2892
2893         if (src_map != NULL)
2894                 src_map->skb = NULL;
2895
2896         desc->addr_hi = ((u64)mapping >> 32);
2897         desc->addr_lo = ((u64)mapping & 0xffffffff);
2898
2899         return skb_size;
2900 }
2901
2902 /* We only need to move over in the address because the other
2903  * members of the RX descriptor are invariant.  See notes above
2904  * tg3_alloc_rx_skb for full details.
2905  */
2906 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2907                            int src_idx, u32 dest_idx_unmasked)
2908 {
2909         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2910         struct ring_info *src_map, *dest_map;
2911         int dest_idx;
2912
2913         switch (opaque_key) {
2914         case RXD_OPAQUE_RING_STD:
2915                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2916                 dest_desc = &tp->rx_std[dest_idx];
2917                 dest_map = &tp->rx_std_buffers[dest_idx];
2918                 src_desc = &tp->rx_std[src_idx];
2919                 src_map = &tp->rx_std_buffers[src_idx];
2920                 break;
2921
2922         case RXD_OPAQUE_RING_JUMBO:
2923                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2924                 dest_desc = &tp->rx_jumbo[dest_idx];
2925                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2926                 src_desc = &tp->rx_jumbo[src_idx];
2927                 src_map = &tp->rx_jumbo_buffers[src_idx];
2928                 break;
2929
2930         default:
2931                 return;
2932         };
2933
2934         dest_map->skb = src_map->skb;
2935         pci_unmap_addr_set(dest_map, mapping,
2936                            pci_unmap_addr(src_map, mapping));
2937         dest_desc->addr_hi = src_desc->addr_hi;
2938         dest_desc->addr_lo = src_desc->addr_lo;
2939
2940         src_map->skb = NULL;
2941 }
2942
2943 #if TG3_VLAN_TAG_USED
2944 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2945 {
2946         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2947 }
2948 #endif
2949
2950 /* The RX ring scheme is composed of multiple rings which post fresh
2951  * buffers to the chip, and one special ring the chip uses to report
2952  * status back to the host.
2953  *
2954  * The special ring reports the status of received packets to the
2955  * host.  The chip does not write into the original descriptor the
2956  * RX buffer was obtained from.  The chip simply takes the original
2957  * descriptor as provided by the host, updates the status and length
2958  * field, then writes this into the next status ring entry.
2959  *
2960  * Each ring the host uses to post buffers to the chip is described
2961  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2962  * it is first placed into the on-chip ram.  When the packet's length
2963  * is known, it walks down the TG3_BDINFO entries to select the ring.
2964  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2965  * which is within the range of the new packet's length is chosen.
2966  *
2967  * The "separate ring for rx status" scheme may sound queer, but it makes
2968  * sense from a cache coherency perspective.  If only the host writes
2969  * to the buffer post rings, and only the chip writes to the rx status
2970  * rings, then cache lines never move beyond shared-modified state.
2971  * If both the host and chip were to write into the same ring, cache line
2972  * eviction could occur since both entities want it in an exclusive state.
2973  */
2974 static int tg3_rx(struct tg3 *tp, int budget)
2975 {
2976         u32 work_mask;
2977         u32 sw_idx = tp->rx_rcb_ptr;
2978         u16 hw_idx;
2979         int received;
2980
2981         hw_idx = tp->hw_status->idx[0].rx_producer;
2982         /*
2983          * We need to order the read of hw_idx and the read of
2984          * the opaque cookie.
2985          */
2986         rmb();
2987         work_mask = 0;
2988         received = 0;
2989         while (sw_idx != hw_idx && budget > 0) {
2990                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2991                 unsigned int len;
2992                 struct sk_buff *skb;
2993                 dma_addr_t dma_addr;
2994                 u32 opaque_key, desc_idx, *post_ptr;
2995
2996                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2997                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2998                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2999                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3000                                                   mapping);
3001                         skb = tp->rx_std_buffers[desc_idx].skb;
3002                         post_ptr = &tp->rx_std_ptr;
3003                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3004                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3005                                                   mapping);
3006                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3007                         post_ptr = &tp->rx_jumbo_ptr;
3008                 }
3009                 else {
3010                         goto next_pkt_nopost;
3011                 }
3012
3013                 work_mask |= opaque_key;
3014
3015                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3016                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3017                 drop_it:
3018                         tg3_recycle_rx(tp, opaque_key,
3019                                        desc_idx, *post_ptr);
3020                 drop_it_no_recycle:
3021                         /* Other statistics kept track of by card. */
3022                         tp->net_stats.rx_dropped++;
3023                         goto next_pkt;
3024                 }
3025
3026                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3027
3028                 if (len > RX_COPY_THRESHOLD 
3029                         && tp->rx_offset == 2
3030                         /* rx_offset != 2 iff this is a 5701 card running
3031                          * in PCI-X mode [see tg3_get_invariants()] */
3032                 ) {
3033                         int skb_size;
3034
3035                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3036                                                     desc_idx, *post_ptr);
3037                         if (skb_size < 0)
3038                                 goto drop_it;
3039
3040                         pci_unmap_single(tp->pdev, dma_addr,
3041                                          skb_size - tp->rx_offset,
3042                                          PCI_DMA_FROMDEVICE);
3043
3044                         skb_put(skb, len);
3045                 } else {
3046                         struct sk_buff *copy_skb;
3047
3048                         tg3_recycle_rx(tp, opaque_key,
3049                                        desc_idx, *post_ptr);
3050
3051                         copy_skb = dev_alloc_skb(len + 2);
3052                         if (copy_skb == NULL)
3053                                 goto drop_it_no_recycle;
3054
3055                         copy_skb->dev = tp->dev;
3056                         skb_reserve(copy_skb, 2);
3057                         skb_put(copy_skb, len);
3058                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3059                         memcpy(copy_skb->data, skb->data, len);
3060                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3061
3062                         /* We'll reuse the original ring buffer. */
3063                         skb = copy_skb;
3064                 }
3065
3066                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3067                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3068                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3069                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3070                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3071                 else
3072                         skb->ip_summed = CHECKSUM_NONE;
3073
3074                 skb->protocol = eth_type_trans(skb, tp->dev);
3075 #if TG3_VLAN_TAG_USED
3076                 if (tp->vlgrp != NULL &&
3077                     desc->type_flags & RXD_FLAG_VLAN) {
3078                         tg3_vlan_rx(tp, skb,
3079                                     desc->err_vlan & RXD_VLAN_MASK);
3080                 } else
3081 #endif
3082                         netif_receive_skb(skb);
3083
3084                 tp->dev->last_rx = jiffies;
3085                 received++;
3086                 budget--;
3087
3088 next_pkt:
3089                 (*post_ptr)++;
3090 next_pkt_nopost:
3091                 sw_idx++;
3092                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3093
3094                 /* Refresh hw_idx to see if there is new work */
3095                 if (sw_idx == hw_idx) {
3096                         hw_idx = tp->hw_status->idx[0].rx_producer;
3097                         rmb();
3098                 }
3099         }
3100
3101         /* ACK the status ring. */
3102         tp->rx_rcb_ptr = sw_idx;
3103         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3104
3105         /* Refill RX ring(s). */
3106         if (work_mask & RXD_OPAQUE_RING_STD) {
3107                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3108                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3109                              sw_idx);
3110         }
3111         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3112                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3113                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3114                              sw_idx);
3115         }
3116         mmiowb();
3117
3118         return received;
3119 }
3120
3121 static int tg3_poll(struct net_device *netdev, int *budget)
3122 {
3123         struct tg3 *tp = netdev_priv(netdev);
3124         struct tg3_hw_status *sblk = tp->hw_status;
3125         int done;
3126
3127         /* handle link change and other phy events */
3128         if (!(tp->tg3_flags &
3129               (TG3_FLAG_USE_LINKCHG_REG |
3130                TG3_FLAG_POLL_SERDES))) {
3131                 if (sblk->status & SD_STATUS_LINK_CHG) {
3132                         sblk->status = SD_STATUS_UPDATED |
3133                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3134                         spin_lock(&tp->lock);
3135                         tg3_setup_phy(tp, 0);
3136                         spin_unlock(&tp->lock);
3137                 }
3138         }
3139
3140         /* run TX completion thread */
3141         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3142                 spin_lock(&tp->tx_lock);
3143                 tg3_tx(tp);
3144                 spin_unlock(&tp->tx_lock);
3145         }
3146
3147         /* run RX thread, within the bounds set by NAPI.
3148          * All RX "locking" is done by ensuring outside
3149          * code synchronizes with dev->poll()
3150          */
3151         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3152                 int orig_budget = *budget;
3153                 int work_done;
3154
3155                 if (orig_budget > netdev->quota)
3156                         orig_budget = netdev->quota;
3157
3158                 work_done = tg3_rx(tp, orig_budget);
3159
3160                 *budget -= work_done;
3161                 netdev->quota -= work_done;
3162         }
3163
3164         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3165                 tp->last_tag = sblk->status_tag;
3166         rmb();
3167         sblk->status &= ~SD_STATUS_UPDATED;
3168
3169         /* if no more work, tell net stack and NIC we're done */
3170         done = !tg3_has_work(tp);
3171         if (done) {
3172                 spin_lock(&tp->lock);
3173                 netif_rx_complete(netdev);
3174                 tg3_restart_ints(tp);
3175                 spin_unlock(&tp->lock);
3176         }
3177
3178         return (done ? 0 : 1);
3179 }
3180
3181 static void tg3_irq_quiesce(struct tg3 *tp)
3182 {
3183         BUG_ON(tp->irq_sync);
3184
3185         tp->irq_sync = 1;
3186         smp_mb();
3187
3188         synchronize_irq(tp->pdev->irq);
3189 }
3190
3191 static inline int tg3_irq_sync(struct tg3 *tp)
3192 {
3193         return tp->irq_sync;
3194 }
3195
3196 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3197  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3198  * with as well.  Most of the time, this is not necessary except when
3199  * shutting down the device.
3200  */
3201 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3202 {
3203         if (irq_sync)
3204                 tg3_irq_quiesce(tp);
3205         spin_lock_bh(&tp->lock);
3206         spin_lock(&tp->tx_lock);
3207 }
3208
3209 static inline void tg3_full_unlock(struct tg3 *tp)
3210 {
3211         spin_unlock(&tp->tx_lock);
3212         spin_unlock_bh(&tp->lock);
3213 }
3214
3215 /* MSI ISR - No need to check for interrupt sharing and no need to
3216  * flush status block and interrupt mailbox. PCI ordering rules
3217  * guarantee that MSI will arrive after the status block.
3218  */
3219 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3220 {
3221         struct net_device *dev = dev_id;
3222         struct tg3 *tp = netdev_priv(dev);
3223         struct tg3_hw_status *sblk = tp->hw_status;
3224
3225         /*
3226          * Writing any value to intr-mbox-0 clears PCI INTA# and
3227          * chip-internal interrupt pending events.
3228          * Writing non-zero to intr-mbox-0 additional tells the
3229          * NIC to stop sending us irqs, engaging "in-intr-handler"
3230          * event coalescing.
3231          */
3232         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3233         tp->last_tag = sblk->status_tag;
3234         rmb();
3235         if (tg3_irq_sync(tp))
3236                 goto out;
3237         sblk->status &= ~SD_STATUS_UPDATED;
3238         if (likely(tg3_has_work(tp)))
3239                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3240         else {
3241                 /* No work, re-enable interrupts.  */
3242                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3243                              tp->last_tag << 24);
3244         }
3245 out:
3246         return IRQ_RETVAL(1);
3247 }
3248
3249 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3250 {
3251         struct net_device *dev = dev_id;
3252         struct tg3 *tp = netdev_priv(dev);
3253         struct tg3_hw_status *sblk = tp->hw_status;
3254         unsigned int handled = 1;
3255
3256         /* In INTx mode, it is possible for the interrupt to arrive at
3257          * the CPU before the status block posted prior to the interrupt.
3258          * Reading the PCI State register will confirm whether the
3259          * interrupt is ours and will flush the status block.
3260          */
3261         if ((sblk->status & SD_STATUS_UPDATED) ||
3262             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3263                 /*
3264                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3265                  * chip-internal interrupt pending events.
3266                  * Writing non-zero to intr-mbox-0 additional tells the
3267                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3268                  * event coalescing.
3269                  */
3270                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3271                              0x00000001);
3272                 if (tg3_irq_sync(tp))
3273                         goto out;
3274                 sblk->status &= ~SD_STATUS_UPDATED;
3275                 if (likely(tg3_has_work(tp)))
3276                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3277                 else {
3278                         /* No work, shared interrupt perhaps?  re-enable
3279                          * interrupts, and flush that PCI write
3280                          */
3281                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3282                                 0x00000000);
3283                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3284                 }
3285         } else {        /* shared interrupt */
3286                 handled = 0;
3287         }
3288 out:
3289         return IRQ_RETVAL(handled);
3290 }
3291
3292 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3293 {
3294         struct net_device *dev = dev_id;
3295         struct tg3 *tp = netdev_priv(dev);
3296         struct tg3_hw_status *sblk = tp->hw_status;
3297         unsigned int handled = 1;
3298
3299         /* In INTx mode, it is possible for the interrupt to arrive at
3300          * the CPU before the status block posted prior to the interrupt.
3301          * Reading the PCI State register will confirm whether the
3302          * interrupt is ours and will flush the status block.
3303          */
3304         if ((sblk->status & SD_STATUS_UPDATED) ||
3305             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3306                 /*
3307                  * writing any value to intr-mbox-0 clears PCI INTA# and
3308                  * chip-internal interrupt pending events.
3309                  * writing non-zero to intr-mbox-0 additional tells the
3310                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3311                  * event coalescing.
3312                  */
3313                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3314                              0x00000001);
3315                 tp->last_tag = sblk->status_tag;
3316                 rmb();
3317                 if (tg3_irq_sync(tp))
3318                         goto out;
3319                 sblk->status &= ~SD_STATUS_UPDATED;
3320                 if (likely(tg3_has_work(tp)))
3321                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3322                 else {
3323                         /* no work, shared interrupt perhaps?  re-enable
3324                          * interrupts, and flush that PCI write
3325                          */
3326                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3327                                      tp->last_tag << 24);
3328                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3329                 }
3330         } else {        /* shared interrupt */
3331                 handled = 0;
3332         }
3333 out:
3334         return IRQ_RETVAL(handled);
3335 }
3336
3337 /* ISR for interrupt test */
3338 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3339                 struct pt_regs *regs)
3340 {
3341         struct net_device *dev = dev_id;
3342         struct tg3 *tp = netdev_priv(dev);
3343         struct tg3_hw_status *sblk = tp->hw_status;
3344
3345         if (sblk->status & SD_STATUS_UPDATED) {
3346                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3347                              0x00000001);
3348                 return IRQ_RETVAL(1);
3349         }
3350         return IRQ_RETVAL(0);
3351 }
3352
3353 static int tg3_init_hw(struct tg3 *);
3354 static int tg3_halt(struct tg3 *, int, int);
3355
3356 #ifdef CONFIG_NET_POLL_CONTROLLER
3357 static void tg3_poll_controller(struct net_device *dev)
3358 {
3359         struct tg3 *tp = netdev_priv(dev);
3360
3361         tg3_interrupt(tp->pdev->irq, dev, NULL);
3362 }
3363 #endif
3364
3365 static void tg3_reset_task(void *_data)
3366 {
3367         struct tg3 *tp = _data;
3368         unsigned int restart_timer;
3369
3370         tg3_netif_stop(tp);
3371
3372         tg3_full_lock(tp, 1);
3373
3374         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3375         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3376
3377         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3378         tg3_init_hw(tp);
3379
3380         tg3_netif_start(tp);
3381
3382         tg3_full_unlock(tp);
3383
3384         if (restart_timer)
3385                 mod_timer(&tp->timer, jiffies + 1);
3386 }
3387
3388 static void tg3_tx_timeout(struct net_device *dev)
3389 {
3390         struct tg3 *tp = netdev_priv(dev);
3391
3392         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3393                dev->name);
3394
3395         schedule_work(&tp->reset_task);
3396 }
3397
3398 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3399
3400 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3401                                        u32 guilty_entry, int guilty_len,
3402                                        u32 last_plus_one, u32 *start, u32 mss)
3403 {
3404         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3405         dma_addr_t new_addr;
3406         u32 entry = *start;
3407         int i;
3408
3409         if (!new_skb) {
3410                 dev_kfree_skb(skb);
3411                 return -1;
3412         }
3413
3414         /* New SKB is guaranteed to be linear. */
3415         entry = *start;
3416         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3417                                   PCI_DMA_TODEVICE);
3418         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3419                     (skb->ip_summed == CHECKSUM_HW) ?
3420                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3421         *start = NEXT_TX(entry);
3422
3423         /* Now clean up the sw ring entries. */
3424         i = 0;
3425         while (entry != last_plus_one) {
3426                 int len;
3427
3428                 if (i == 0)
3429                         len = skb_headlen(skb);
3430                 else
3431                         len = skb_shinfo(skb)->frags[i-1].size;
3432                 pci_unmap_single(tp->pdev,
3433                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3434                                  len, PCI_DMA_TODEVICE);
3435                 if (i == 0) {
3436                         tp->tx_buffers[entry].skb = new_skb;
3437                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3438                 } else {
3439                         tp->tx_buffers[entry].skb = NULL;
3440                 }
3441                 entry = NEXT_TX(entry);
3442                 i++;
3443         }
3444
3445         dev_kfree_skb(skb);
3446
3447         return 0;
3448 }
3449
3450 static void tg3_set_txd(struct tg3 *tp, int entry,
3451                         dma_addr_t mapping, int len, u32 flags,
3452                         u32 mss_and_is_end)
3453 {
3454         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3455         int is_end = (mss_and_is_end & 0x1);
3456         u32 mss = (mss_and_is_end >> 1);
3457         u32 vlan_tag = 0;
3458
3459         if (is_end)
3460                 flags |= TXD_FLAG_END;
3461         if (flags & TXD_FLAG_VLAN) {
3462                 vlan_tag = flags >> 16;
3463                 flags &= 0xffff;
3464         }
3465         vlan_tag |= (mss << TXD_MSS_SHIFT);
3466
3467         txd->addr_hi = ((u64) mapping >> 32);
3468         txd->addr_lo = ((u64) mapping & 0xffffffff);
3469         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3470         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3471 }
3472
3473 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3474 {
3475         u32 base = (u32) mapping & 0xffffffff;
3476
3477         return ((base > 0xffffdcc0) &&
3478                 (base + len + 8 < base));
3479 }
3480
3481 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3482 {
3483         struct tg3 *tp = netdev_priv(dev);
3484         dma_addr_t mapping;
3485         unsigned int i;
3486         u32 len, entry, base_flags, mss;
3487         int would_hit_hwbug;
3488
3489         len = skb_headlen(skb);
3490
3491         /* No BH disabling for tx_lock here.  We are running in BH disabled
3492          * context and TX reclaim runs via tp->poll inside of a software
3493          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3494          * no IRQ context deadlocks to worry about either.  Rejoice!
3495          */
3496         if (!spin_trylock(&tp->tx_lock))
3497                 return NETDEV_TX_LOCKED; 
3498
3499         /* This is a hard error, log it. */
3500         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3501                 netif_stop_queue(dev);
3502                 spin_unlock(&tp->tx_lock);
3503                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3504                        dev->name);
3505                 return NETDEV_TX_BUSY;
3506         }
3507
3508         entry = tp->tx_prod;
3509         base_flags = 0;
3510         if (skb->ip_summed == CHECKSUM_HW)
3511                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3512 #if TG3_TSO_SUPPORT != 0
3513         mss = 0;
3514         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3515             (mss = skb_shinfo(skb)->tso_size) != 0) {
3516                 int tcp_opt_len, ip_tcp_len;
3517
3518                 if (skb_header_cloned(skb) &&
3519                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3520                         dev_kfree_skb(skb);
3521                         goto out_unlock;
3522                 }
3523
3524                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3525                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3526
3527                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3528                                TXD_FLAG_CPU_POST_DMA);
3529
3530                 skb->nh.iph->check = 0;
3531                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3532                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3533                         skb->h.th->check = 0;
3534                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3535                 }
3536                 else {
3537                         skb->h.th->check =
3538                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3539                                                    skb->nh.iph->daddr,
3540                                                    0, IPPROTO_TCP, 0);
3541                 }
3542
3543                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3544                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3545                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3546                                 int tsflags;
3547
3548                                 tsflags = ((skb->nh.iph->ihl - 5) +
3549                                            (tcp_opt_len >> 2));
3550                                 mss |= (tsflags << 11);
3551                         }
3552                 } else {
3553                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3554                                 int tsflags;
3555
3556                                 tsflags = ((skb->nh.iph->ihl - 5) +
3557                                            (tcp_opt_len >> 2));
3558                                 base_flags |= tsflags << 12;
3559                         }
3560                 }
3561         }
3562 #else
3563         mss = 0;
3564 #endif
3565 #if TG3_VLAN_TAG_USED
3566         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3567                 base_flags |= (TXD_FLAG_VLAN |
3568                                (vlan_tx_tag_get(skb) << 16));
3569 #endif
3570
3571         /* Queue skb data, a.k.a. the main skb fragment. */
3572         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3573
3574         tp->tx_buffers[entry].skb = skb;
3575         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3576
3577         would_hit_hwbug = 0;
3578
3579         if (tg3_4g_overflow_test(mapping, len))
3580                 would_hit_hwbug = entry + 1;
3581
3582         tg3_set_txd(tp, entry, mapping, len, base_flags,
3583                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3584
3585         entry = NEXT_TX(entry);
3586
3587         /* Now loop through additional data fragments, and queue them. */
3588         if (skb_shinfo(skb)->nr_frags > 0) {
3589                 unsigned int i, last;
3590
3591                 last = skb_shinfo(skb)->nr_frags - 1;
3592                 for (i = 0; i <= last; i++) {
3593                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3594
3595                         len = frag->size;
3596                         mapping = pci_map_page(tp->pdev,
3597                                                frag->page,
3598                                                frag->page_offset,
3599                                                len, PCI_DMA_TODEVICE);
3600
3601                         tp->tx_buffers[entry].skb = NULL;
3602                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3603
3604                         if (tg3_4g_overflow_test(mapping, len)) {
3605                                 /* Only one should match. */
3606                                 if (would_hit_hwbug)
3607                                         BUG();
3608                                 would_hit_hwbug = entry + 1;
3609                         }
3610
3611                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3612                                 tg3_set_txd(tp, entry, mapping, len,
3613                                             base_flags, (i == last)|(mss << 1));
3614                         else
3615                                 tg3_set_txd(tp, entry, mapping, len,
3616                                             base_flags, (i == last));
3617
3618                         entry = NEXT_TX(entry);
3619                 }
3620         }
3621
3622         if (would_hit_hwbug) {
3623                 u32 last_plus_one = entry;
3624                 u32 start;
3625                 unsigned int len = 0;
3626
3627                 would_hit_hwbug -= 1;
3628                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3629                 entry &= (TG3_TX_RING_SIZE - 1);
3630                 start = entry;
3631                 i = 0;
3632                 while (entry != last_plus_one) {
3633                         if (i == 0)
3634                                 len = skb_headlen(skb);
3635                         else
3636                                 len = skb_shinfo(skb)->frags[i-1].size;
3637
3638                         if (entry == would_hit_hwbug)
3639                                 break;
3640
3641                         i++;
3642                         entry = NEXT_TX(entry);
3643
3644                 }
3645
3646                 /* If the workaround fails due to memory/mapping
3647                  * failure, silently drop this packet.
3648                  */
3649                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3650                                                 entry, len,
3651                                                 last_plus_one,
3652                                                 &start, mss))
3653                         goto out_unlock;
3654
3655                 entry = start;
3656         }
3657
3658         /* Packets are ready, update Tx producer idx local and on card. */
3659         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3660
3661         tp->tx_prod = entry;
3662         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3663                 netif_stop_queue(dev);
3664
3665 out_unlock:
3666         mmiowb();
3667         spin_unlock(&tp->tx_lock);
3668
3669         dev->trans_start = jiffies;
3670
3671         return NETDEV_TX_OK;
3672 }
3673
3674 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3675                                int new_mtu)
3676 {
3677         dev->mtu = new_mtu;
3678
3679         if (new_mtu > ETH_DATA_LEN) {
3680                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
3681                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3682                         ethtool_op_set_tso(dev, 0);
3683                 }
3684                 else
3685                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3686         } else {
3687                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
3688                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3689                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3690         }
3691 }
3692
3693 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3694 {
3695         struct tg3 *tp = netdev_priv(dev);
3696
3697         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3698                 return -EINVAL;
3699
3700         if (!netif_running(dev)) {
3701                 /* We'll just catch it later when the
3702                  * device is up'd.
3703                  */
3704                 tg3_set_mtu(dev, tp, new_mtu);
3705                 return 0;
3706         }
3707
3708         tg3_netif_stop(tp);
3709
3710         tg3_full_lock(tp, 1);
3711
3712         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3713
3714         tg3_set_mtu(dev, tp, new_mtu);
3715
3716         tg3_init_hw(tp);
3717
3718         tg3_netif_start(tp);
3719
3720         tg3_full_unlock(tp);
3721
3722         return 0;
3723 }
3724
3725 /* Free up pending packets in all rx/tx rings.
3726  *
3727  * The chip has been shut down and the driver detached from
3728  * the networking, so no interrupts or new tx packets will
3729  * end up in the driver.  tp->{tx,}lock is not held and we are not
3730  * in an interrupt context and thus may sleep.
3731  */
3732 static void tg3_free_rings(struct tg3 *tp)
3733 {
3734         struct ring_info *rxp;
3735         int i;
3736
3737         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3738                 rxp = &tp->rx_std_buffers[i];
3739
3740                 if (rxp->skb == NULL)
3741                         continue;
3742                 pci_unmap_single(tp->pdev,
3743                                  pci_unmap_addr(rxp, mapping),
3744                                  tp->rx_pkt_buf_sz - tp->rx_offset,
3745                                  PCI_DMA_FROMDEVICE);
3746                 dev_kfree_skb_any(rxp->skb);
3747                 rxp->skb = NULL;
3748         }
3749
3750         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3751                 rxp = &tp->rx_jumbo_buffers[i];
3752
3753                 if (rxp->skb == NULL)
3754                         continue;
3755                 pci_unmap_single(tp->pdev,
3756                                  pci_unmap_addr(rxp, mapping),
3757                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3758                                  PCI_DMA_FROMDEVICE);
3759                 dev_kfree_skb_any(rxp->skb);
3760                 rxp->skb = NULL;
3761         }
3762
3763         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3764                 struct tx_ring_info *txp;
3765                 struct sk_buff *skb;
3766                 int j;
3767
3768                 txp = &tp->tx_buffers[i];
3769                 skb = txp->skb;
3770
3771                 if (skb == NULL) {
3772                         i++;
3773                         continue;
3774                 }
3775
3776                 pci_unmap_single(tp->pdev,
3777                                  pci_unmap_addr(txp, mapping),
3778                                  skb_headlen(skb),
3779                                  PCI_DMA_TODEVICE);
3780                 txp->skb = NULL;
3781
3782                 i++;
3783
3784                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3785                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3786                         pci_unmap_page(tp->pdev,
3787                                        pci_unmap_addr(txp, mapping),
3788                                        skb_shinfo(skb)->frags[j].size,
3789                                        PCI_DMA_TODEVICE);
3790                         i++;
3791                 }
3792
3793                 dev_kfree_skb_any(skb);
3794         }
3795 }
3796
3797 /* Initialize tx/rx rings for packet processing.
3798  *
3799  * The chip has been shut down and the driver detached from
3800  * the networking, so no interrupts or new tx packets will
3801  * end up in the driver.  tp->{tx,}lock are held and thus
3802  * we may not sleep.
3803  */
3804 static void tg3_init_rings(struct tg3 *tp)
3805 {
3806         u32 i;
3807
3808         /* Free up all the SKBs. */
3809         tg3_free_rings(tp);
3810
3811         /* Zero out all descriptors. */
3812         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3813         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3814         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3815         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3816
3817         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3818         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) &&
3819             (tp->dev->mtu > ETH_DATA_LEN))
3820                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3821
3822         /* Initialize invariants of the rings, we only set this
3823          * stuff once.  This works because the card does not
3824          * write into the rx buffer posting rings.
3825          */
3826         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3827                 struct tg3_rx_buffer_desc *rxd;
3828
3829                 rxd = &tp->rx_std[i];
3830                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
3831                         << RXD_LEN_SHIFT;
3832                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3833                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3834                                (i << RXD_OPAQUE_INDEX_SHIFT));
3835         }
3836
3837         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3838                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3839                         struct tg3_rx_buffer_desc *rxd;
3840
3841                         rxd = &tp->rx_jumbo[i];
3842                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3843                                 << RXD_LEN_SHIFT;
3844                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3845                                 RXD_FLAG_JUMBO;
3846                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3847                                (i << RXD_OPAQUE_INDEX_SHIFT));
3848                 }
3849         }
3850
3851         /* Now allocate fresh SKBs for each rx ring. */
3852         for (i = 0; i < tp->rx_pending; i++) {
3853                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3854                                      -1, i) < 0)
3855                         break;
3856         }
3857
3858         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
3859                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3860                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3861                                              -1, i) < 0)
3862                                 break;
3863                 }
3864         }
3865 }
3866
3867 /*
3868  * Must not be invoked with interrupt sources disabled and
3869  * the hardware shutdown down.
3870  */
3871 static void tg3_free_consistent(struct tg3 *tp)
3872 {
3873         if (tp->rx_std_buffers) {
3874                 kfree(tp->rx_std_buffers);
3875                 tp->rx_std_buffers = NULL;
3876         }
3877         if (tp->rx_std) {
3878                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3879                                     tp->rx_std, tp->rx_std_mapping);
3880                 tp->rx_std = NULL;
3881         }
3882         if (tp->rx_jumbo) {
3883                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3884                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3885                 tp->rx_jumbo = NULL;
3886         }
3887         if (tp->rx_rcb) {
3888                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3889                                     tp->rx_rcb, tp->rx_rcb_mapping);
3890                 tp->rx_rcb = NULL;
3891         }
3892         if (tp->tx_ring) {
3893                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3894                         tp->tx_ring, tp->tx_desc_mapping);
3895                 tp->tx_ring = NULL;
3896         }
3897         if (tp->hw_status) {
3898                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3899                                     tp->hw_status, tp->status_mapping);
3900                 tp->hw_status = NULL;
3901         }
3902         if (tp->hw_stats) {
3903                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3904                                     tp->hw_stats, tp->stats_mapping);
3905                 tp->hw_stats = NULL;
3906         }
3907 }
3908
3909 /*
3910  * Must not be invoked with interrupt sources disabled and
3911  * the hardware shutdown down.  Can sleep.
3912  */
3913 static int tg3_alloc_consistent(struct tg3 *tp)
3914 {
3915         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3916                                       (TG3_RX_RING_SIZE +
3917                                        TG3_RX_JUMBO_RING_SIZE)) +
3918                                      (sizeof(struct tx_ring_info) *
3919                                       TG3_TX_RING_SIZE),
3920                                      GFP_KERNEL);
3921         if (!tp->rx_std_buffers)
3922                 return -ENOMEM;
3923
3924         memset(tp->rx_std_buffers, 0,
3925                (sizeof(struct ring_info) *
3926                 (TG3_RX_RING_SIZE +
3927                  TG3_RX_JUMBO_RING_SIZE)) +
3928                (sizeof(struct tx_ring_info) *
3929                 TG3_TX_RING_SIZE));
3930
3931         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3932         tp->tx_buffers = (struct tx_ring_info *)
3933                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3934
3935         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3936                                           &tp->rx_std_mapping);
3937         if (!tp->rx_std)
3938                 goto err_out;
3939
3940         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3941                                             &tp->rx_jumbo_mapping);
3942
3943         if (!tp->rx_jumbo)
3944                 goto err_out;
3945
3946         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3947                                           &tp->rx_rcb_mapping);
3948         if (!tp->rx_rcb)
3949                 goto err_out;
3950
3951         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3952                                            &tp->tx_desc_mapping);
3953         if (!tp->tx_ring)
3954                 goto err_out;
3955
3956         tp->hw_status = pci_alloc_consistent(tp->pdev,
3957                                              TG3_HW_STATUS_SIZE,
3958                                              &tp->status_mapping);
3959         if (!tp->hw_status)
3960                 goto err_out;
3961
3962         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3963                                             sizeof(struct tg3_hw_stats),
3964                                             &tp->stats_mapping);
3965         if (!tp->hw_stats)
3966                 goto err_out;
3967
3968         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3969         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3970
3971         return 0;
3972
3973 err_out:
3974         tg3_free_consistent(tp);
3975         return -ENOMEM;
3976 }
3977
3978 #define MAX_WAIT_CNT 1000
3979
3980 /* To stop a block, clear the enable bit and poll till it
3981  * clears.  tp->lock is held.
3982  */
3983 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
3984 {
3985         unsigned int i;
3986         u32 val;
3987
3988         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3989                 switch (ofs) {
3990                 case RCVLSC_MODE:
3991                 case DMAC_MODE:
3992                 case MBFREE_MODE:
3993                 case BUFMGR_MODE:
3994                 case MEMARB_MODE:
3995                         /* We can't enable/disable these bits of the
3996                          * 5705/5750, just say success.
3997                          */
3998                         return 0;
3999
4000                 default:
4001                         break;
4002                 };
4003         }
4004
4005         val = tr32(ofs);
4006         val &= ~enable_bit;
4007         tw32_f(ofs, val);
4008
4009         for (i = 0; i < MAX_WAIT_CNT; i++) {
4010                 udelay(100);
4011                 val = tr32(ofs);
4012                 if ((val & enable_bit) == 0)
4013                         break;
4014         }
4015
4016         if (i == MAX_WAIT_CNT && !silent) {
4017                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4018                        "ofs=%lx enable_bit=%x\n",
4019                        ofs, enable_bit);
4020                 return -ENODEV;
4021         }
4022
4023         return 0;
4024 }
4025
4026 /* tp->lock is held. */
4027 static int tg3_abort_hw(struct tg3 *tp, int silent)
4028 {
4029         int i, err;
4030
4031         tg3_disable_ints(tp);
4032
4033         tp->rx_mode &= ~RX_MODE_ENABLE;
4034         tw32_f(MAC_RX_MODE, tp->rx_mode);
4035         udelay(10);
4036
4037         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4038         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4039         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4040         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4041         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4042         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4043
4044         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4045         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4046         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4047         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4048         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4049         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4050         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4051
4052         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4053         tw32_f(MAC_MODE, tp->mac_mode);
4054         udelay(40);
4055
4056         tp->tx_mode &= ~TX_MODE_ENABLE;
4057         tw32_f(MAC_TX_MODE, tp->tx_mode);
4058
4059         for (i = 0; i < MAX_WAIT_CNT; i++) {
4060                 udelay(100);
4061                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4062                         break;
4063         }
4064         if (i >= MAX_WAIT_CNT) {
4065                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4066                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4067                        tp->dev->name, tr32(MAC_TX_MODE));
4068                 err |= -ENODEV;
4069         }
4070
4071         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4072         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4073         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4074
4075         tw32(FTQ_RESET, 0xffffffff);
4076         tw32(FTQ_RESET, 0x00000000);
4077
4078         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4079         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4080
4081         if (tp->hw_status)
4082                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4083         if (tp->hw_stats)
4084                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4085
4086         return err;
4087 }
4088
4089 /* tp->lock is held. */
4090 static int tg3_nvram_lock(struct tg3 *tp)
4091 {
4092         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4093                 int i;
4094
4095                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4096                 for (i = 0; i < 8000; i++) {
4097                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4098                                 break;
4099                         udelay(20);
4100                 }
4101                 if (i == 8000)
4102                         return -ENODEV;
4103         }
4104         return 0;
4105 }
4106
4107 /* tp->lock is held. */
4108 static void tg3_nvram_unlock(struct tg3 *tp)
4109 {
4110         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4111                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4112 }
4113
4114 /* tp->lock is held. */
4115 static void tg3_enable_nvram_access(struct tg3 *tp)
4116 {
4117         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4118             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4119                 u32 nvaccess = tr32(NVRAM_ACCESS);
4120
4121                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4122         }
4123 }
4124
4125 /* tp->lock is held. */
4126 static void tg3_disable_nvram_access(struct tg3 *tp)
4127 {
4128         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4129             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4130                 u32 nvaccess = tr32(NVRAM_ACCESS);
4131
4132                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4133         }
4134 }
4135
4136 /* tp->lock is held. */
4137 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4138 {
4139         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4140                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4141                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4142
4143         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4144                 switch (kind) {
4145                 case RESET_KIND_INIT:
4146                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4147                                       DRV_STATE_START);
4148                         break;
4149
4150                 case RESET_KIND_SHUTDOWN:
4151                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4152                                       DRV_STATE_UNLOAD);
4153                         break;
4154
4155                 case RESET_KIND_SUSPEND:
4156                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4157                                       DRV_STATE_SUSPEND);
4158                         break;
4159
4160                 default:
4161                         break;
4162                 };
4163         }
4164 }
4165
4166 /* tp->lock is held. */
4167 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4168 {
4169         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4170                 switch (kind) {
4171                 case RESET_KIND_INIT:
4172                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4173                                       DRV_STATE_START_DONE);
4174                         break;
4175
4176                 case RESET_KIND_SHUTDOWN:
4177                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4178                                       DRV_STATE_UNLOAD_DONE);
4179                         break;
4180
4181                 default:
4182                         break;
4183                 };
4184         }
4185 }
4186
4187 /* tp->lock is held. */
4188 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4189 {
4190         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4191                 switch (kind) {
4192                 case RESET_KIND_INIT:
4193                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4194                                       DRV_STATE_START);
4195                         break;
4196
4197                 case RESET_KIND_SHUTDOWN:
4198                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4199                                       DRV_STATE_UNLOAD);
4200                         break;
4201
4202                 case RESET_KIND_SUSPEND:
4203                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4204                                       DRV_STATE_SUSPEND);
4205                         break;
4206
4207                 default:
4208                         break;
4209                 };
4210         }
4211 }
4212
4213 static void tg3_stop_fw(struct tg3 *);
4214
4215 /* tp->lock is held. */
4216 static int tg3_chip_reset(struct tg3 *tp)
4217 {
4218         u32 val;
4219         u32 flags_save;
4220         int i;
4221
4222         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4223                 tg3_nvram_lock(tp);
4224
4225         /*
4226          * We must avoid the readl() that normally takes place.
4227          * It locks machines, causes machine checks, and other
4228          * fun things.  So, temporarily disable the 5701
4229          * hardware workaround, while we do the reset.
4230          */
4231         flags_save = tp->tg3_flags;
4232         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
4233
4234         /* do the reset */
4235         val = GRC_MISC_CFG_CORECLK_RESET;
4236
4237         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4238                 if (tr32(0x7e2c) == 0x60) {
4239                         tw32(0x7e2c, 0x20);
4240                 }
4241                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4242                         tw32(GRC_MISC_CFG, (1 << 29));
4243                         val |= (1 << 29);
4244                 }
4245         }
4246
4247         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4248                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4249         tw32(GRC_MISC_CFG, val);
4250
4251         /* restore 5701 hardware bug workaround flag */
4252         tp->tg3_flags = flags_save;
4253
4254         /* Unfortunately, we have to delay before the PCI read back.
4255          * Some 575X chips even will not respond to a PCI cfg access
4256          * when the reset command is given to the chip.
4257          *
4258          * How do these hardware designers expect things to work
4259          * properly if the PCI write is posted for a long period
4260          * of time?  It is always necessary to have some method by
4261          * which a register read back can occur to push the write
4262          * out which does the reset.
4263          *
4264          * For most tg3 variants the trick below was working.
4265          * Ho hum...
4266          */
4267         udelay(120);
4268
4269         /* Flush PCI posted writes.  The normal MMIO registers
4270          * are inaccessible at this time so this is the only
4271          * way to make this reliably (actually, this is no longer
4272          * the case, see above).  I tried to use indirect
4273          * register read/write but this upset some 5701 variants.
4274          */
4275         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4276
4277         udelay(120);
4278
4279         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4280                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4281                         int i;
4282                         u32 cfg_val;
4283
4284                         /* Wait for link training to complete.  */
4285                         for (i = 0; i < 5000; i++)
4286                                 udelay(100);
4287
4288                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4289                         pci_write_config_dword(tp->pdev, 0xc4,
4290                                                cfg_val | (1 << 15));
4291                 }
4292                 /* Set PCIE max payload size and clear error status.  */
4293                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4294         }
4295
4296         /* Re-enable indirect register accesses. */
4297         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4298                                tp->misc_host_ctrl);
4299
4300         /* Set MAX PCI retry to zero. */
4301         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4302         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4303             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4304                 val |= PCISTATE_RETRY_SAME_DMA;
4305         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4306
4307         pci_restore_state(tp->pdev);
4308
4309         /* Make sure PCI-X relaxed ordering bit is clear. */
4310         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4311         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4312         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4313
4314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
4315                 u32 val;
4316
4317                 /* Chip reset on 5780 will reset MSI enable bit,
4318                  * so need to restore it.
4319                  */
4320                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4321                         u16 ctrl;
4322
4323                         pci_read_config_word(tp->pdev,
4324                                              tp->msi_cap + PCI_MSI_FLAGS,
4325                                              &ctrl);
4326                         pci_write_config_word(tp->pdev,
4327                                               tp->msi_cap + PCI_MSI_FLAGS,
4328                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4329                         val = tr32(MSGINT_MODE);
4330                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4331                 }
4332
4333                 val = tr32(MEMARB_MODE);
4334                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4335
4336         } else
4337                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4338
4339         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4340                 tg3_stop_fw(tp);
4341                 tw32(0x5000, 0x400);
4342         }
4343
4344         tw32(GRC_MODE, tp->grc_mode);
4345
4346         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4347                 u32 val = tr32(0xc4);
4348
4349                 tw32(0xc4, val | (1 << 15));
4350         }
4351
4352         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4353             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4354                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4355                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4356                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4357                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4358         }
4359
4360         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4361                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4362                 tw32_f(MAC_MODE, tp->mac_mode);
4363         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4364                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4365                 tw32_f(MAC_MODE, tp->mac_mode);
4366         } else
4367                 tw32_f(MAC_MODE, 0);
4368         udelay(40);
4369
4370         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4371                 /* Wait for firmware initialization to complete. */
4372                 for (i = 0; i < 100000; i++) {
4373                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4374                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4375                                 break;
4376                         udelay(10);
4377                 }
4378                 if (i >= 100000) {
4379                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4380                                "firmware will not restart magic=%08x\n",
4381                                tp->dev->name, val);
4382                         return -ENODEV;
4383                 }
4384         }
4385
4386         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4387             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4388                 u32 val = tr32(0x7c00);
4389
4390                 tw32(0x7c00, val | (1 << 25));
4391         }
4392
4393         /* Reprobe ASF enable state.  */
4394         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4395         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4396         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4397         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4398                 u32 nic_cfg;
4399
4400                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4401                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4402                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4403                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4404                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4405                 }
4406         }
4407
4408         return 0;
4409 }
4410
4411 /* tp->lock is held. */
4412 static void tg3_stop_fw(struct tg3 *tp)
4413 {
4414         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4415                 u32 val;
4416                 int i;
4417
4418                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4419                 val = tr32(GRC_RX_CPU_EVENT);
4420                 val |= (1 << 14);
4421                 tw32(GRC_RX_CPU_EVENT, val);
4422
4423                 /* Wait for RX cpu to ACK the event.  */
4424                 for (i = 0; i < 100; i++) {
4425                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4426                                 break;
4427                         udelay(1);
4428                 }
4429         }
4430 }
4431
4432 /* tp->lock is held. */
4433 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4434 {
4435         int err;
4436
4437         tg3_stop_fw(tp);
4438
4439         tg3_write_sig_pre_reset(tp, kind);
4440
4441         tg3_abort_hw(tp, silent);
4442         err = tg3_chip_reset(tp);
4443
4444         tg3_write_sig_legacy(tp, kind);
4445         tg3_write_sig_post_reset(tp, kind);
4446
4447         if (err)
4448                 return err;
4449
4450         return 0;
4451 }
4452
4453 #define TG3_FW_RELEASE_MAJOR    0x0
4454 #define TG3_FW_RELASE_MINOR     0x0
4455 #define TG3_FW_RELEASE_FIX      0x0
4456 #define TG3_FW_START_ADDR       0x08000000
4457 #define TG3_FW_TEXT_ADDR        0x08000000
4458 #define TG3_FW_TEXT_LEN         0x9c0
4459 #define TG3_FW_RODATA_ADDR      0x080009c0
4460 #define TG3_FW_RODATA_LEN       0x60
4461 #define TG3_FW_DATA_ADDR        0x08000a40
4462 #define TG3_FW_DATA_LEN         0x20
4463 #define TG3_FW_SBSS_ADDR        0x08000a60
4464 #define TG3_FW_SBSS_LEN         0xc
4465 #define TG3_FW_BSS_ADDR         0x08000a70
4466 #define TG3_FW_BSS_LEN          0x10
4467
4468 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4469         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4470         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4471         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4472         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4473         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4474         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4475         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4476         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4477         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4478         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4479         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4480         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4481         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4482         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4483         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4484         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4485         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4486         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4487         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4488         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4489         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4490         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4491         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4492         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4493         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4494         0, 0, 0, 0, 0, 0,
4495         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4496         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4497         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4498         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4499         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4500         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4501         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4502         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4503         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4504         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4505         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4506         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4507         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4508         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4509         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4510         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4511         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4512         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4513         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4514         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4515         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4516         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4517         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4518         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4519         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4520         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4521         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4522         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4523         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4524         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4525         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4526         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4527         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4528         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4529         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4530         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4531         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4532         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4533         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4534         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4535         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4536         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4537         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4538         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4539         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4540         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4541         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4542         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4543         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4544         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4545         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4546         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4547         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4548         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4549         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4550         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4551         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4552         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4553         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4554         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4555         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4556         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4557         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4558         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4559         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4560 };
4561
4562 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4563         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4564         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4565         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4566         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4567         0x00000000
4568 };
4569
4570 #if 0 /* All zeros, don't eat up space with it. */
4571 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4572         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4573         0x00000000, 0x00000000, 0x00000000, 0x00000000
4574 };
4575 #endif
4576
4577 #define RX_CPU_SCRATCH_BASE     0x30000
4578 #define RX_CPU_SCRATCH_SIZE     0x04000
4579 #define TX_CPU_SCRATCH_BASE     0x34000
4580 #define TX_CPU_SCRATCH_SIZE     0x04000
4581
4582 /* tp->lock is held. */
4583 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4584 {
4585         int i;
4586
4587         if (offset == TX_CPU_BASE &&
4588             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4589                 BUG();
4590
4591         if (offset == RX_CPU_BASE) {
4592                 for (i = 0; i < 10000; i++) {
4593                         tw32(offset + CPU_STATE, 0xffffffff);
4594                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4595                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4596                                 break;
4597                 }
4598
4599                 tw32(offset + CPU_STATE, 0xffffffff);
4600                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4601                 udelay(10);
4602         } else {
4603                 for (i = 0; i < 10000; i++) {
4604                         tw32(offset + CPU_STATE, 0xffffffff);
4605                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4606                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4607                                 break;
4608                 }
4609         }
4610
4611         if (i >= 10000) {
4612                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4613                        "and %s CPU\n",
4614                        tp->dev->name,
4615                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4616                 return -ENODEV;
4617         }
4618         return 0;
4619 }
4620
4621 struct fw_info {
4622         unsigned int text_base;
4623         unsigned int text_len;
4624         u32 *text_data;
4625         unsigned int rodata_base;
4626         unsigned int rodata_len;
4627         u32 *rodata_data;
4628         unsigned int data_base;
4629         unsigned int data_len;
4630         u32 *data_data;
4631 };
4632
4633 /* tp->lock is held. */
4634 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4635                                  int cpu_scratch_size, struct fw_info *info)
4636 {
4637         int err, i;
4638         u32 orig_tg3_flags = tp->tg3_flags;
4639         void (*write_op)(struct tg3 *, u32, u32);
4640
4641         if (cpu_base == TX_CPU_BASE &&
4642             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4643                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4644                        "TX cpu firmware on %s which is 5705.\n",
4645                        tp->dev->name);
4646                 return -EINVAL;
4647         }
4648
4649         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4650                 write_op = tg3_write_mem;
4651         else
4652                 write_op = tg3_write_indirect_reg32;
4653
4654         /* Force use of PCI config space for indirect register
4655          * write calls.
4656          */
4657         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4658
4659         /* It is possible that bootcode is still loading at this point.
4660          * Get the nvram lock first before halting the cpu.
4661          */
4662         tg3_nvram_lock(tp);
4663         err = tg3_halt_cpu(tp, cpu_base);
4664         tg3_nvram_unlock(tp);
4665         if (err)
4666                 goto out;
4667
4668         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4669                 write_op(tp, cpu_scratch_base + i, 0);
4670         tw32(cpu_base + CPU_STATE, 0xffffffff);
4671         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4672         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4673                 write_op(tp, (cpu_scratch_base +
4674                               (info->text_base & 0xffff) +
4675                               (i * sizeof(u32))),
4676                          (info->text_data ?
4677                           info->text_data[i] : 0));
4678         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4679                 write_op(tp, (cpu_scratch_base +
4680                               (info->rodata_base & 0xffff) +
4681                               (i * sizeof(u32))),
4682                          (info->rodata_data ?
4683                           info->rodata_data[i] : 0));
4684         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4685                 write_op(tp, (cpu_scratch_base +
4686                               (info->data_base & 0xffff) +
4687                               (i * sizeof(u32))),
4688                          (info->data_data ?
4689                           info->data_data[i] : 0));
4690
4691         err = 0;
4692
4693 out:
4694         tp->tg3_flags = orig_tg3_flags;
4695         return err;
4696 }
4697
4698 /* tp->lock is held. */
4699 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4700 {
4701         struct fw_info info;
4702         int err, i;
4703
4704         info.text_base = TG3_FW_TEXT_ADDR;
4705         info.text_len = TG3_FW_TEXT_LEN;
4706         info.text_data = &tg3FwText[0];
4707         info.rodata_base = TG3_FW_RODATA_ADDR;
4708         info.rodata_len = TG3_FW_RODATA_LEN;
4709         info.rodata_data = &tg3FwRodata[0];
4710         info.data_base = TG3_FW_DATA_ADDR;
4711         info.data_len = TG3_FW_DATA_LEN;
4712         info.data_data = NULL;
4713
4714         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4715                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4716                                     &info);
4717         if (err)
4718                 return err;
4719
4720         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4721                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4722                                     &info);
4723         if (err)
4724                 return err;
4725
4726         /* Now startup only the RX cpu. */
4727         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4728         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4729
4730         for (i = 0; i < 5; i++) {
4731                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4732                         break;
4733                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4734                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4735                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4736                 udelay(1000);
4737         }
4738         if (i >= 5) {
4739                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4740                        "to set RX CPU PC, is %08x should be %08x\n",
4741                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4742                        TG3_FW_TEXT_ADDR);
4743                 return -ENODEV;
4744         }
4745         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4746         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4747
4748         return 0;
4749 }
4750
4751 #if TG3_TSO_SUPPORT != 0
4752
4753 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4754 #define TG3_TSO_FW_RELASE_MINOR         0x6
4755 #define TG3_TSO_FW_RELEASE_FIX          0x0
4756 #define TG3_TSO_FW_START_ADDR           0x08000000
4757 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4758 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4759 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4760 #define TG3_TSO_FW_RODATA_LEN           0x60
4761 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4762 #define TG3_TSO_FW_DATA_LEN             0x30
4763 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4764 #define TG3_TSO_FW_SBSS_LEN             0x2c
4765 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4766 #define TG3_TSO_FW_BSS_LEN              0x894
4767
4768 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4769         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4770         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4771         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4772         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4773         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4774         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4775         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4776         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4777         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4778         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4779         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4780         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4781         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4782         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4783         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4784         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4785         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4786         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4787         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4788         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4789         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4790         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4791         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4792         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4793         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4794         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4795         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4796         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4797         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4798         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4799         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4800         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4801         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4802         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4803         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4804         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4805         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4806         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4807         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4808         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4809         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4810         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4811         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4812         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4813         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4814         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4815         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4816         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4817         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4818         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4819         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4820         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4821         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4822         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4823         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4824         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4825         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4826         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4827         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4828         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4829         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4830         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4831         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4832         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4833         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4834         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4835         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4836         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4837         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4838         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4839         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4840         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4841         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4842         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4843         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4844         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4845         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4846         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4847         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4848         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4849         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4850         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4851         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4852         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4853         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4854         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4855         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4856         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4857         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4858         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4859         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4860         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4861         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4862         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4863         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4864         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4865         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4866         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4867         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4868         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4869         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4870         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4871         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4872         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4873         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4874         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4875         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4876         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4877         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4878         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4879         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4880         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4881         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4882         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4883         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4884         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4885         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4886         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4887         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4888         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4889         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4890         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4891         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4892         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4893         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4894         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4895         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4896         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4897         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4898         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4899         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4900         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4901         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4902         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4903         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4904         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4905         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4906         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4907         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4908         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4909         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4910         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4911         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4912         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4913         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4914         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4915         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4916         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4917         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4918         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4919         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4920         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4921         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4922         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4923         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4924         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4925         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4926         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4927         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4928         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4929         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4930         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4931         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4932         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4933         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4934         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4935         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4936         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4937         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4938         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4939         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4940         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4941         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4942         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4943         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4944         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4945         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4946         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4947         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4948         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4949         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4950         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4951         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4952         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4953         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4954         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4955         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4956         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4957         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4958         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4959         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4960         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4961         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4962         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4963         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4964         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4965         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4966         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4967         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4968         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4969         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4970         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4971         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4972         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4973         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4974         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4975         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4976         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4977         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4978         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4979         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4980         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4981         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4982         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4983         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4984         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4985         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4986         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4987         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4988         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4989         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4990         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4991         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4992         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4993         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4994         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4995         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4996         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4997         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4998         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4999         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5000         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5001         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5002         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5003         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5004         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5005         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5006         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5007         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5008         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5009         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5010         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5011         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5012         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5013         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5014         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5015         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5016         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5017         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5018         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5019         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5020         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5021         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5022         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5023         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5024         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5025         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5026         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5027         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5028         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5029         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5030         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5031         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5032         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5033         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5034         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5035         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5036         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5037         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5038         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5039         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5040         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5041         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5042         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5043         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5044         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5045         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5046         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5047         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5048         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5049         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5050         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5051         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5052         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5053 };
5054
5055 static u32 tg3TsoFwRodata[] = {
5056         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5057         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5058         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5059         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5060         0x00000000,
5061 };
5062
5063 static u32 tg3TsoFwData[] = {
5064         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5065         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5066         0x00000000,
5067 };
5068
5069 /* 5705 needs a special version of the TSO firmware.  */
5070 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5071 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5072 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5073 #define TG3_TSO5_FW_START_ADDR          0x00010000
5074 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5075 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5076 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5077 #define TG3_TSO5_FW_RODATA_LEN          0x50
5078 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5079 #define TG3_TSO5_FW_DATA_LEN            0x20
5080 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5081 #define TG3_TSO5_FW_SBSS_LEN            0x28
5082 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5083 #define TG3_TSO5_FW_BSS_LEN             0x88
5084
5085 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5086         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5087         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5088         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5089         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5090         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5091         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5092         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5093         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5094         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5095         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5096         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5097         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5098         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5099         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5100         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5101         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5102         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5103         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5104         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5105         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5106         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5107         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5108         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5109         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5110         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5111         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5112         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5113         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5114         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5115         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5116         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5117         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5118         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5119         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5120         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5121         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5122         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5123         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5124         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5125         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5126         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5127         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5128         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5129         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5130         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5131         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5132         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5133         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5134         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5135         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5136         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5137         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5138         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5139         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5140         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5141         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5142         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5143         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5144         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5145         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5146         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5147         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5148         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5149         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5150         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5151         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5152         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5153         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5154         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5155         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5156         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5157         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5158         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5159         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5160         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5161         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5162         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5163         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5164         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5165         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5166         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5167         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5168         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5169         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5170         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5171         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5172         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5173         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5174         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5175         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5176         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5177         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5178         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5179         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5180         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5181         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5182         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5183         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5184         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5185         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5186         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5187         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5188         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5189         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5190         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5191         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5192         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5193         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5194         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5195         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5196         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5197         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5198         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5199         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5200         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5201         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5202         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5203         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5204         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5205         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5206         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5207         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5208         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5209         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5210         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5211         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5212         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5213         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5214         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5215         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5216         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5217         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5218         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5219         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5220         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5221         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5222         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5223         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5224         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5225         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5226         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5227         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5228         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5229         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5230         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5231         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5232         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5233         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5234         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5235         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5236         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5237         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5238         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5239         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5240         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5241         0x00000000, 0x00000000, 0x00000000,
5242 };
5243
5244 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5245         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5246         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5247         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5248         0x00000000, 0x00000000, 0x00000000,
5249 };
5250
5251 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5252         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5253         0x00000000, 0x00000000, 0x00000000,
5254 };
5255
5256 /* tp->lock is held. */
5257 static int tg3_load_tso_firmware(struct tg3 *tp)
5258 {
5259         struct fw_info info;
5260         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5261         int err, i;
5262
5263         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5264                 return 0;
5265
5266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5267                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5268                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5269                 info.text_data = &tg3Tso5FwText[0];
5270                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5271                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5272                 info.rodata_data = &tg3Tso5FwRodata[0];
5273                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5274                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5275                 info.data_data = &tg3Tso5FwData[0];
5276                 cpu_base = RX_CPU_BASE;
5277                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5278                 cpu_scratch_size = (info.text_len +
5279                                     info.rodata_len +
5280                                     info.data_len +
5281                                     TG3_TSO5_FW_SBSS_LEN +
5282                                     TG3_TSO5_FW_BSS_LEN);
5283         } else {
5284                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5285                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5286                 info.text_data = &tg3TsoFwText[0];
5287                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5288                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5289                 info.rodata_data = &tg3TsoFwRodata[0];
5290                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5291                 info.data_len = TG3_TSO_FW_DATA_LEN;
5292                 info.data_data = &tg3TsoFwData[0];
5293                 cpu_base = TX_CPU_BASE;
5294                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5295                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5296         }
5297
5298         err = tg3_load_firmware_cpu(tp, cpu_base,
5299                                     cpu_scratch_base, cpu_scratch_size,
5300                                     &info);
5301         if (err)
5302                 return err;
5303
5304         /* Now startup the cpu. */
5305         tw32(cpu_base + CPU_STATE, 0xffffffff);
5306         tw32_f(cpu_base + CPU_PC,    info.text_base);
5307
5308         for (i = 0; i < 5; i++) {
5309                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5310                         break;
5311                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5312                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5313                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5314                 udelay(1000);
5315         }
5316         if (i >= 5) {
5317                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5318                        "to set CPU PC, is %08x should be %08x\n",
5319                        tp->dev->name, tr32(cpu_base + CPU_PC),
5320                        info.text_base);
5321                 return -ENODEV;
5322         }
5323         tw32(cpu_base + CPU_STATE, 0xffffffff);
5324         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5325         return 0;
5326 }
5327
5328 #endif /* TG3_TSO_SUPPORT != 0 */
5329
5330 /* tp->lock is held. */
5331 static void __tg3_set_mac_addr(struct tg3 *tp)
5332 {
5333         u32 addr_high, addr_low;
5334         int i;
5335
5336         addr_high = ((tp->dev->dev_addr[0] << 8) |
5337                      tp->dev->dev_addr[1]);
5338         addr_low = ((tp->dev->dev_addr[2] << 24) |
5339                     (tp->dev->dev_addr[3] << 16) |
5340                     (tp->dev->dev_addr[4] <<  8) |
5341                     (tp->dev->dev_addr[5] <<  0));
5342         for (i = 0; i < 4; i++) {
5343                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5344                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5345         }
5346
5347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5349                 for (i = 0; i < 12; i++) {
5350                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5351                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5352                 }
5353         }
5354
5355         addr_high = (tp->dev->dev_addr[0] +
5356                      tp->dev->dev_addr[1] +
5357                      tp->dev->dev_addr[2] +
5358                      tp->dev->dev_addr[3] +
5359                      tp->dev->dev_addr[4] +
5360                      tp->dev->dev_addr[5]) &
5361                 TX_BACKOFF_SEED_MASK;
5362         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5363 }
5364
5365 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5366 {
5367         struct tg3 *tp = netdev_priv(dev);
5368         struct sockaddr *addr = p;
5369
5370         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5371
5372         spin_lock_bh(&tp->lock);
5373         __tg3_set_mac_addr(tp);
5374         spin_unlock_bh(&tp->lock);
5375
5376         return 0;
5377 }
5378
5379 /* tp->lock is held. */
5380 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5381                            dma_addr_t mapping, u32 maxlen_flags,
5382                            u32 nic_addr)
5383 {
5384         tg3_write_mem(tp,
5385                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5386                       ((u64) mapping >> 32));
5387         tg3_write_mem(tp,
5388                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5389                       ((u64) mapping & 0xffffffff));
5390         tg3_write_mem(tp,
5391                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5392                        maxlen_flags);
5393
5394         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5395                 tg3_write_mem(tp,
5396                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5397                               nic_addr);
5398 }
5399
5400 static void __tg3_set_rx_mode(struct net_device *);
5401 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5402 {
5403         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5404         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5405         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5406         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5407         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5408                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5409                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5410         }
5411         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5412         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5413         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5414                 u32 val = ec->stats_block_coalesce_usecs;
5415
5416                 if (!netif_carrier_ok(tp->dev))
5417                         val = 0;
5418
5419                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5420         }
5421 }
5422
5423 /* tp->lock is held. */
5424 static int tg3_reset_hw(struct tg3 *tp)
5425 {
5426         u32 val, rdmac_mode;
5427         int i, err, limit;
5428
5429         tg3_disable_ints(tp);
5430
5431         tg3_stop_fw(tp);
5432
5433         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5434
5435         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5436                 tg3_abort_hw(tp, 1);
5437         }
5438
5439         err = tg3_chip_reset(tp);
5440         if (err)
5441                 return err;
5442
5443         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5444
5445         /* This works around an issue with Athlon chipsets on
5446          * B3 tigon3 silicon.  This bit has no effect on any
5447          * other revision.  But do not set this on PCI Express
5448          * chips.
5449          */
5450         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5451                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5452         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5453
5454         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5455             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5456                 val = tr32(TG3PCI_PCISTATE);
5457                 val |= PCISTATE_RETRY_SAME_DMA;
5458                 tw32(TG3PCI_PCISTATE, val);
5459         }
5460
5461         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5462                 /* Enable some hw fixes.  */
5463                 val = tr32(TG3PCI_MSI_DATA);
5464                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5465                 tw32(TG3PCI_MSI_DATA, val);
5466         }
5467
5468         /* Descriptor ring init may make accesses to the
5469          * NIC SRAM area to setup the TX descriptors, so we
5470          * can only do this after the hardware has been
5471          * successfully reset.
5472          */
5473         tg3_init_rings(tp);
5474
5475         /* This value is determined during the probe time DMA
5476          * engine test, tg3_test_dma.
5477          */
5478         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5479
5480         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5481                           GRC_MODE_4X_NIC_SEND_RINGS |
5482                           GRC_MODE_NO_TX_PHDR_CSUM |
5483                           GRC_MODE_NO_RX_PHDR_CSUM);
5484         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5485         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5486                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5487         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5488                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5489
5490         tw32(GRC_MODE,
5491              tp->grc_mode |
5492              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5493
5494         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5495         val = tr32(GRC_MISC_CFG);
5496         val &= ~0xff;
5497         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5498         tw32(GRC_MISC_CFG, val);
5499
5500         /* Initialize MBUF/DESC pool. */
5501         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5502                 /* Do nothing.  */
5503         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5504                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5505                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5506                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5507                 else
5508                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5509                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5510                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5511         }
5512 #if TG3_TSO_SUPPORT != 0
5513         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5514                 int fw_len;
5515
5516                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5517                           TG3_TSO5_FW_RODATA_LEN +
5518                           TG3_TSO5_FW_DATA_LEN +
5519                           TG3_TSO5_FW_SBSS_LEN +
5520                           TG3_TSO5_FW_BSS_LEN);
5521                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5522                 tw32(BUFMGR_MB_POOL_ADDR,
5523                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5524                 tw32(BUFMGR_MB_POOL_SIZE,
5525                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5526         }
5527 #endif
5528
5529         if (tp->dev->mtu <= ETH_DATA_LEN) {
5530                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5531                      tp->bufmgr_config.mbuf_read_dma_low_water);
5532                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5533                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5534                 tw32(BUFMGR_MB_HIGH_WATER,
5535                      tp->bufmgr_config.mbuf_high_water);
5536         } else {
5537                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5538                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5539                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5540                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5541                 tw32(BUFMGR_MB_HIGH_WATER,
5542                      tp->bufmgr_config.mbuf_high_water_jumbo);
5543         }
5544         tw32(BUFMGR_DMA_LOW_WATER,
5545              tp->bufmgr_config.dma_low_water);
5546         tw32(BUFMGR_DMA_HIGH_WATER,
5547              tp->bufmgr_config.dma_high_water);
5548
5549         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5550         for (i = 0; i < 2000; i++) {
5551                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5552                         break;
5553                 udelay(10);
5554         }
5555         if (i >= 2000) {
5556                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5557                        tp->dev->name);
5558                 return -ENODEV;
5559         }
5560
5561         /* Setup replenish threshold. */
5562         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5563
5564         /* Initialize TG3_BDINFO's at:
5565          *  RCVDBDI_STD_BD:     standard eth size rx ring
5566          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5567          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5568          *
5569          * like so:
5570          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5571          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5572          *                              ring attribute flags
5573          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5574          *
5575          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5576          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5577          *
5578          * The size of each ring is fixed in the firmware, but the location is
5579          * configurable.
5580          */
5581         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5582              ((u64) tp->rx_std_mapping >> 32));
5583         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5584              ((u64) tp->rx_std_mapping & 0xffffffff));
5585         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5586              NIC_SRAM_RX_BUFFER_DESC);
5587
5588         /* Don't even try to program the JUMBO/MINI buffer descriptor
5589          * configs on 5705.
5590          */
5591         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5592                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5593                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5594         } else {
5595                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5596                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5597
5598                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5599                      BDINFO_FLAGS_DISABLED);
5600
5601                 /* Setup replenish threshold. */
5602                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5603
5604                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5605                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5606                              ((u64) tp->rx_jumbo_mapping >> 32));
5607                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5608                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5609                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5610                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5611                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5612                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5613                 } else {
5614                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5615                              BDINFO_FLAGS_DISABLED);
5616                 }
5617
5618         }
5619
5620         /* There is only one send ring on 5705/5750, no need to explicitly
5621          * disable the others.
5622          */
5623         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5624                 /* Clear out send RCB ring in SRAM. */
5625                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5626                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5627                                       BDINFO_FLAGS_DISABLED);
5628         }
5629
5630         tp->tx_prod = 0;
5631         tp->tx_cons = 0;
5632         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5633         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5634
5635         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5636                        tp->tx_desc_mapping,
5637                        (TG3_TX_RING_SIZE <<
5638                         BDINFO_FLAGS_MAXLEN_SHIFT),
5639                        NIC_SRAM_TX_BUFFER_DESC);
5640
5641         /* There is only one receive return ring on 5705/5750, no need
5642          * to explicitly disable the others.
5643          */
5644         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5645                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5646                      i += TG3_BDINFO_SIZE) {
5647                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5648                                       BDINFO_FLAGS_DISABLED);
5649                 }
5650         }
5651
5652         tp->rx_rcb_ptr = 0;
5653         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5654
5655         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5656                        tp->rx_rcb_mapping,
5657                        (TG3_RX_RCB_RING_SIZE(tp) <<
5658                         BDINFO_FLAGS_MAXLEN_SHIFT),
5659                        0);
5660
5661         tp->rx_std_ptr = tp->rx_pending;
5662         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5663                      tp->rx_std_ptr);
5664
5665         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
5666                                                 tp->rx_jumbo_pending : 0;
5667         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5668                      tp->rx_jumbo_ptr);
5669
5670         /* Initialize MAC address and backoff seed. */
5671         __tg3_set_mac_addr(tp);
5672
5673         /* MTU + ethernet header + FCS + optional VLAN tag */
5674         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5675
5676         /* The slot time is changed by tg3_setup_phy if we
5677          * run at gigabit with half duplex.
5678          */
5679         tw32(MAC_TX_LENGTHS,
5680              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5681              (6 << TX_LENGTHS_IPG_SHIFT) |
5682              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5683
5684         /* Receive rules. */
5685         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5686         tw32(RCVLPC_CONFIG, 0x0181);
5687
5688         /* Calculate RDMAC_MODE setting early, we need it to determine
5689          * the RCVLPC_STATE_ENABLE mask.
5690          */
5691         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5692                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5693                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5694                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5695                       RDMAC_MODE_LNGREAD_ENAB);
5696         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5697                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5698
5699         /* If statement applies to 5705 and 5750 PCI devices only */
5700         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5701              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5702             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5703                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5704                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5705                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5706                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5707                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5708                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5709                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5710                 }
5711         }
5712
5713         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5714                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5715
5716 #if TG3_TSO_SUPPORT != 0
5717         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5718                 rdmac_mode |= (1 << 27);
5719 #endif
5720
5721         /* Receive/send statistics. */
5722         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5723             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5724                 val = tr32(RCVLPC_STATS_ENABLE);
5725                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5726                 tw32(RCVLPC_STATS_ENABLE, val);
5727         } else {
5728                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5729         }
5730         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5731         tw32(SNDDATAI_STATSENAB, 0xffffff);
5732         tw32(SNDDATAI_STATSCTRL,
5733              (SNDDATAI_SCTRL_ENABLE |
5734               SNDDATAI_SCTRL_FASTUPD));
5735
5736         /* Setup host coalescing engine. */
5737         tw32(HOSTCC_MODE, 0);
5738         for (i = 0; i < 2000; i++) {
5739                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5740                         break;
5741                 udelay(10);
5742         }
5743
5744         __tg3_set_coalesce(tp, &tp->coal);
5745
5746         /* set status block DMA address */
5747         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5748              ((u64) tp->status_mapping >> 32));
5749         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5750              ((u64) tp->status_mapping & 0xffffffff));
5751
5752         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5753                 /* Status/statistics block address.  See tg3_timer,
5754                  * the tg3_periodic_fetch_stats call there, and
5755                  * tg3_get_stats to see how this works for 5705/5750 chips.
5756                  */
5757                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5758                      ((u64) tp->stats_mapping >> 32));
5759                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5760                      ((u64) tp->stats_mapping & 0xffffffff));
5761                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5762                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5763         }
5764
5765         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5766
5767         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5768         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5769         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5770                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5771
5772         /* Clear statistics/status block in chip, and status block in ram. */
5773         for (i = NIC_SRAM_STATS_BLK;
5774              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5775              i += sizeof(u32)) {
5776                 tg3_write_mem(tp, i, 0);
5777                 udelay(40);
5778         }
5779         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5780
5781         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5782                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5783         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5784         udelay(40);
5785
5786         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5787          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5788          * register to preserve the GPIO settings for LOMs. The GPIOs,
5789          * whether used as inputs or outputs, are set by boot code after
5790          * reset.
5791          */
5792         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
5793                 u32 gpio_mask;
5794
5795                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
5796                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
5797
5798                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
5799                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
5800                                      GRC_LCLCTRL_GPIO_OUTPUT3;
5801
5802                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
5803
5804                 /* GPIO1 must be driven high for eeprom write protect */
5805                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5806                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5807         }
5808         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5809         udelay(100);
5810
5811         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5812         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5813         tp->last_tag = 0;
5814
5815         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5816                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5817                 udelay(40);
5818         }
5819
5820         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5821                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5822                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5823                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5824                WDMAC_MODE_LNGREAD_ENAB);
5825
5826         /* If statement applies to 5705 and 5750 PCI devices only */
5827         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5828              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5829             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5830                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5831                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5832                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5833                         /* nothing */
5834                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5835                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5836                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5837                         val |= WDMAC_MODE_RX_ACCEL;
5838                 }
5839         }
5840
5841         tw32_f(WDMAC_MODE, val);
5842         udelay(40);
5843
5844         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5845                 val = tr32(TG3PCI_X_CAPS);
5846                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5847                         val &= ~PCIX_CAPS_BURST_MASK;
5848                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5849                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5850                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5851                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5852                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5853                                 val |= (tp->split_mode_max_reqs <<
5854                                         PCIX_CAPS_SPLIT_SHIFT);
5855                 }
5856                 tw32(TG3PCI_X_CAPS, val);
5857         }
5858
5859         tw32_f(RDMAC_MODE, rdmac_mode);
5860         udelay(40);
5861
5862         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5863         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5864                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5865         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5866         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5867         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5868         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5869         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5870 #if TG3_TSO_SUPPORT != 0
5871         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5872                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5873 #endif
5874         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5875         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5876
5877         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5878                 err = tg3_load_5701_a0_firmware_fix(tp);
5879                 if (err)
5880                         return err;
5881         }
5882
5883 #if TG3_TSO_SUPPORT != 0
5884         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5885                 err = tg3_load_tso_firmware(tp);
5886                 if (err)
5887                         return err;
5888         }
5889 #endif
5890
5891         tp->tx_mode = TX_MODE_ENABLE;
5892         tw32_f(MAC_TX_MODE, tp->tx_mode);
5893         udelay(100);
5894
5895         tp->rx_mode = RX_MODE_ENABLE;
5896         tw32_f(MAC_RX_MODE, tp->rx_mode);
5897         udelay(10);
5898
5899         if (tp->link_config.phy_is_low_power) {
5900                 tp->link_config.phy_is_low_power = 0;
5901                 tp->link_config.speed = tp->link_config.orig_speed;
5902                 tp->link_config.duplex = tp->link_config.orig_duplex;
5903                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5904         }
5905
5906         tp->mi_mode = MAC_MI_MODE_BASE;
5907         tw32_f(MAC_MI_MODE, tp->mi_mode);
5908         udelay(80);
5909
5910         tw32(MAC_LED_CTRL, tp->led_ctrl);
5911
5912         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5913         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5914                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5915                 udelay(10);
5916         }
5917         tw32_f(MAC_RX_MODE, tp->rx_mode);
5918         udelay(10);
5919
5920         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5921                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5922                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5923                         /* Set drive transmission level to 1.2V  */
5924                         /* only if the signal pre-emphasis bit is not set  */
5925                         val = tr32(MAC_SERDES_CFG);
5926                         val &= 0xfffff000;
5927                         val |= 0x880;
5928                         tw32(MAC_SERDES_CFG, val);
5929                 }
5930                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5931                         tw32(MAC_SERDES_CFG, 0x616000);
5932         }
5933
5934         /* Prevent chip from dropping frames when flow control
5935          * is enabled.
5936          */
5937         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5938
5939         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5940             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5941                 /* Use hardware link auto-negotiation */
5942                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5943         }
5944
5945         err = tg3_setup_phy(tp, 1);
5946         if (err)
5947                 return err;
5948
5949         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5950                 u32 tmp;
5951
5952                 /* Clear CRC stats. */
5953                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5954                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5955                         tg3_readphy(tp, 0x14, &tmp);
5956                 }
5957         }
5958
5959         __tg3_set_rx_mode(tp->dev);
5960
5961         /* Initialize receive rules. */
5962         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5963         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5964         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5965         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5966
5967         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
5968             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780))
5969                 limit = 8;
5970         else
5971                 limit = 16;
5972         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5973                 limit -= 4;
5974         switch (limit) {
5975         case 16:
5976                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5977         case 15:
5978                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5979         case 14:
5980                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5981         case 13:
5982                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5983         case 12:
5984                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5985         case 11:
5986                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5987         case 10:
5988                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5989         case 9:
5990                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5991         case 8:
5992                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5993         case 7:
5994                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5995         case 6:
5996                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5997         case 5:
5998                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5999         case 4:
6000                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6001         case 3:
6002                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6003         case 2:
6004         case 1:
6005
6006         default:
6007                 break;
6008         };
6009
6010         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6011
6012         return 0;
6013 }
6014
6015 /* Called at device open time to get the chip ready for
6016  * packet processing.  Invoked with tp->lock held.
6017  */
6018 static int tg3_init_hw(struct tg3 *tp)
6019 {
6020         int err;
6021
6022         /* Force the chip into D0. */
6023         err = tg3_set_power_state(tp, 0);
6024         if (err)
6025                 goto out;
6026
6027         tg3_switch_clocks(tp);
6028
6029         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6030
6031         err = tg3_reset_hw(tp);
6032
6033 out:
6034         return err;
6035 }
6036
6037 #define TG3_STAT_ADD32(PSTAT, REG) \
6038 do {    u32 __val = tr32(REG); \
6039         (PSTAT)->low += __val; \
6040         if ((PSTAT)->low < __val) \
6041                 (PSTAT)->high += 1; \
6042 } while (0)
6043
6044 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6045 {
6046         struct tg3_hw_stats *sp = tp->hw_stats;
6047
6048         if (!netif_carrier_ok(tp->dev))
6049                 return;
6050
6051         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6052         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6053         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6054         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6055         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6056         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6057         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6058         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6059         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6060         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6061         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6062         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6063         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6064
6065         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6066         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6067         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6068         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6069         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6070         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6071         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6072         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6073         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6074         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6075         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6076         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6077         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6078         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6079 }
6080
6081 static void tg3_timer(unsigned long __opaque)
6082 {
6083         struct tg3 *tp = (struct tg3 *) __opaque;
6084
6085         spin_lock(&tp->lock);
6086
6087         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6088                 /* All of this garbage is because when using non-tagged
6089                  * IRQ status the mailbox/status_block protocol the chip
6090                  * uses with the cpu is race prone.
6091                  */
6092                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6093                         tw32(GRC_LOCAL_CTRL,
6094                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6095                 } else {
6096                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6097                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6098                 }
6099
6100                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6101                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6102                         spin_unlock(&tp->lock);
6103                         schedule_work(&tp->reset_task);
6104                         return;
6105                 }
6106         }
6107
6108         /* This part only runs once per second. */
6109         if (!--tp->timer_counter) {
6110                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6111                         tg3_periodic_fetch_stats(tp);
6112
6113                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6114                         u32 mac_stat;
6115                         int phy_event;
6116
6117                         mac_stat = tr32(MAC_STATUS);
6118
6119                         phy_event = 0;
6120                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6121                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6122                                         phy_event = 1;
6123                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6124                                 phy_event = 1;
6125
6126                         if (phy_event)
6127                                 tg3_setup_phy(tp, 0);
6128                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6129                         u32 mac_stat = tr32(MAC_STATUS);
6130                         int need_setup = 0;
6131
6132                         if (netif_carrier_ok(tp->dev) &&
6133                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6134                                 need_setup = 1;
6135                         }
6136                         if (! netif_carrier_ok(tp->dev) &&
6137                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6138                                          MAC_STATUS_SIGNAL_DET))) {
6139                                 need_setup = 1;
6140                         }
6141                         if (need_setup) {
6142                                 tw32_f(MAC_MODE,
6143                                      (tp->mac_mode &
6144                                       ~MAC_MODE_PORT_MODE_MASK));
6145                                 udelay(40);
6146                                 tw32_f(MAC_MODE, tp->mac_mode);
6147                                 udelay(40);
6148                                 tg3_setup_phy(tp, 0);
6149                         }
6150                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6151                         tg3_serdes_parallel_detect(tp);
6152
6153                 tp->timer_counter = tp->timer_multiplier;
6154         }
6155
6156         /* Heartbeat is only sent once every 120 seconds.  */
6157         if (!--tp->asf_counter) {
6158                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6159                         u32 val;
6160
6161                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
6162                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6163                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
6164                         val = tr32(GRC_RX_CPU_EVENT);
6165                         val |= (1 << 14);
6166                         tw32(GRC_RX_CPU_EVENT, val);
6167                 }
6168                 tp->asf_counter = tp->asf_multiplier;
6169         }
6170
6171         spin_unlock(&tp->lock);
6172
6173         tp->timer.expires = jiffies + tp->timer_offset;
6174         add_timer(&tp->timer);
6175 }
6176
6177 static int tg3_test_interrupt(struct tg3 *tp)
6178 {
6179         struct net_device *dev = tp->dev;
6180         int err, i;
6181         u32 int_mbox = 0;
6182
6183         if (!netif_running(dev))
6184                 return -ENODEV;
6185
6186         tg3_disable_ints(tp);
6187
6188         free_irq(tp->pdev->irq, dev);
6189
6190         err = request_irq(tp->pdev->irq, tg3_test_isr,
6191                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6192         if (err)
6193                 return err;
6194
6195         tg3_enable_ints(tp);
6196
6197         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6198                HOSTCC_MODE_NOW);
6199
6200         for (i = 0; i < 5; i++) {
6201                 int_mbox = tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
6202                 if (int_mbox != 0)
6203                         break;
6204                 msleep(10);
6205         }
6206
6207         tg3_disable_ints(tp);
6208
6209         free_irq(tp->pdev->irq, dev);
6210         
6211         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6212                 err = request_irq(tp->pdev->irq, tg3_msi,
6213                                   SA_SAMPLE_RANDOM, dev->name, dev);
6214         else {
6215                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6216                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6217                         fn = tg3_interrupt_tagged;
6218                 err = request_irq(tp->pdev->irq, fn,
6219                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6220         }
6221
6222         if (err)
6223                 return err;
6224
6225         if (int_mbox != 0)
6226                 return 0;
6227
6228         return -EIO;
6229 }
6230
6231 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6232  * successfully restored
6233  */
6234 static int tg3_test_msi(struct tg3 *tp)
6235 {
6236         struct net_device *dev = tp->dev;
6237         int err;
6238         u16 pci_cmd;
6239
6240         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6241                 return 0;
6242
6243         /* Turn off SERR reporting in case MSI terminates with Master
6244          * Abort.
6245          */
6246         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6247         pci_write_config_word(tp->pdev, PCI_COMMAND,
6248                               pci_cmd & ~PCI_COMMAND_SERR);
6249
6250         err = tg3_test_interrupt(tp);
6251
6252         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6253
6254         if (!err)
6255                 return 0;
6256
6257         /* other failures */
6258         if (err != -EIO)
6259                 return err;
6260
6261         /* MSI test failed, go back to INTx mode */
6262         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6263                "switching to INTx mode. Please report this failure to "
6264                "the PCI maintainer and include system chipset information.\n",
6265                        tp->dev->name);
6266
6267         free_irq(tp->pdev->irq, dev);
6268         pci_disable_msi(tp->pdev);
6269
6270         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6271
6272         {
6273                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6274                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6275                         fn = tg3_interrupt_tagged;
6276
6277                 err = request_irq(tp->pdev->irq, fn,
6278                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6279         }
6280         if (err)
6281                 return err;
6282
6283         /* Need to reset the chip because the MSI cycle may have terminated
6284          * with Master Abort.
6285          */
6286         tg3_full_lock(tp, 1);
6287
6288         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6289         err = tg3_init_hw(tp);
6290
6291         tg3_full_unlock(tp);
6292
6293         if (err)
6294                 free_irq(tp->pdev->irq, dev);
6295
6296         return err;
6297 }
6298
6299 static int tg3_open(struct net_device *dev)
6300 {
6301         struct tg3 *tp = netdev_priv(dev);
6302         int err;
6303
6304         tg3_full_lock(tp, 0);
6305
6306         tg3_disable_ints(tp);
6307         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6308
6309         tg3_full_unlock(tp);
6310
6311         /* The placement of this call is tied
6312          * to the setup and use of Host TX descriptors.
6313          */
6314         err = tg3_alloc_consistent(tp);
6315         if (err)
6316                 return err;
6317
6318         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6319             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6320             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
6321                 /* All MSI supporting chips should support tagged
6322                  * status.  Assert that this is the case.
6323                  */
6324                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6325                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6326                                "Not using MSI.\n", tp->dev->name);
6327                 } else if (pci_enable_msi(tp->pdev) == 0) {
6328                         u32 msi_mode;
6329
6330                         msi_mode = tr32(MSGINT_MODE);
6331                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6332                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6333                 }
6334         }
6335         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
6336                 err = request_irq(tp->pdev->irq, tg3_msi,
6337                                   SA_SAMPLE_RANDOM, dev->name, dev);
6338         else {
6339                 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6340                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6341                         fn = tg3_interrupt_tagged;
6342
6343                 err = request_irq(tp->pdev->irq, fn,
6344                                   SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6345         }
6346
6347         if (err) {
6348                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6349                         pci_disable_msi(tp->pdev);
6350                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6351                 }
6352                 tg3_free_consistent(tp);
6353                 return err;
6354         }
6355
6356         tg3_full_lock(tp, 0);
6357
6358         err = tg3_init_hw(tp);
6359         if (err) {
6360                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6361                 tg3_free_rings(tp);
6362         } else {
6363                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6364                         tp->timer_offset = HZ;
6365                 else
6366                         tp->timer_offset = HZ / 10;
6367
6368                 BUG_ON(tp->timer_offset > HZ);
6369                 tp->timer_counter = tp->timer_multiplier =
6370                         (HZ / tp->timer_offset);
6371                 tp->asf_counter = tp->asf_multiplier =
6372                         ((HZ / tp->timer_offset) * 120);
6373
6374                 init_timer(&tp->timer);
6375                 tp->timer.expires = jiffies + tp->timer_offset;
6376                 tp->timer.data = (unsigned long) tp;
6377                 tp->timer.function = tg3_timer;
6378         }
6379
6380         tg3_full_unlock(tp);
6381
6382         if (err) {
6383                 free_irq(tp->pdev->irq, dev);
6384                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6385                         pci_disable_msi(tp->pdev);
6386                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6387                 }
6388                 tg3_free_consistent(tp);
6389                 return err;
6390         }
6391
6392         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6393                 err = tg3_test_msi(tp);
6394
6395                 if (err) {
6396                         tg3_full_lock(tp, 0);
6397
6398                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6399                                 pci_disable_msi(tp->pdev);
6400                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6401                         }
6402                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6403                         tg3_free_rings(tp);
6404                         tg3_free_consistent(tp);
6405
6406                         tg3_full_unlock(tp);
6407
6408                         return err;
6409                 }
6410         }
6411
6412         tg3_full_lock(tp, 0);
6413
6414         add_timer(&tp->timer);
6415         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6416         tg3_enable_ints(tp);
6417
6418         tg3_full_unlock(tp);
6419
6420         netif_start_queue(dev);
6421
6422         return 0;
6423 }
6424
6425 #if 0
6426 /*static*/ void tg3_dump_state(struct tg3 *tp)
6427 {
6428         u32 val32, val32_2, val32_3, val32_4, val32_5;
6429         u16 val16;
6430         int i;
6431
6432         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6433         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6434         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6435                val16, val32);
6436
6437         /* MAC block */
6438         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6439                tr32(MAC_MODE), tr32(MAC_STATUS));
6440         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6441                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6442         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6443                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6444         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6445                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6446
6447         /* Send data initiator control block */
6448         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6449                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6450         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6451                tr32(SNDDATAI_STATSCTRL));
6452
6453         /* Send data completion control block */
6454         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6455
6456         /* Send BD ring selector block */
6457         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6458                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6459
6460         /* Send BD initiator control block */
6461         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6462                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6463
6464         /* Send BD completion control block */
6465         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6466
6467         /* Receive list placement control block */
6468         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6469                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6470         printk("       RCVLPC_STATSCTRL[%08x]\n",
6471                tr32(RCVLPC_STATSCTRL));
6472
6473         /* Receive data and receive BD initiator control block */
6474         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6475                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6476
6477         /* Receive data completion control block */
6478         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6479                tr32(RCVDCC_MODE));
6480
6481         /* Receive BD initiator control block */
6482         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6483                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6484
6485         /* Receive BD completion control block */
6486         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6487                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6488
6489         /* Receive list selector control block */
6490         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6491                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6492
6493         /* Mbuf cluster free block */
6494         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6495                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6496
6497         /* Host coalescing control block */
6498         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6499                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6500         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6501                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6502                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6503         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6504                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6505                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6506         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6507                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6508         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6509                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6510
6511         /* Memory arbiter control block */
6512         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6513                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6514
6515         /* Buffer manager control block */
6516         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6517                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6518         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6519                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6520         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6521                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6522                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6523                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6524
6525         /* Read DMA control block */
6526         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6527                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6528
6529         /* Write DMA control block */
6530         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6531                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6532
6533         /* DMA completion block */
6534         printk("DEBUG: DMAC_MODE[%08x]\n",
6535                tr32(DMAC_MODE));
6536
6537         /* GRC block */
6538         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6539                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6540         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6541                tr32(GRC_LOCAL_CTRL));
6542
6543         /* TG3_BDINFOs */
6544         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6545                tr32(RCVDBDI_JUMBO_BD + 0x0),
6546                tr32(RCVDBDI_JUMBO_BD + 0x4),
6547                tr32(RCVDBDI_JUMBO_BD + 0x8),
6548                tr32(RCVDBDI_JUMBO_BD + 0xc));
6549         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6550                tr32(RCVDBDI_STD_BD + 0x0),
6551                tr32(RCVDBDI_STD_BD + 0x4),
6552                tr32(RCVDBDI_STD_BD + 0x8),
6553                tr32(RCVDBDI_STD_BD + 0xc));
6554         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6555                tr32(RCVDBDI_MINI_BD + 0x0),
6556                tr32(RCVDBDI_MINI_BD + 0x4),
6557                tr32(RCVDBDI_MINI_BD + 0x8),
6558                tr32(RCVDBDI_MINI_BD + 0xc));
6559
6560         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6561         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6562         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6563         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6564         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6565                val32, val32_2, val32_3, val32_4);
6566
6567         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6568         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6569         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6570         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6571         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6572                val32, val32_2, val32_3, val32_4);
6573
6574         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6575         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6576         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6577         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6578         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6579         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6580                val32, val32_2, val32_3, val32_4, val32_5);
6581
6582         /* SW status block */
6583         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6584                tp->hw_status->status,
6585                tp->hw_status->status_tag,
6586                tp->hw_status->rx_jumbo_consumer,
6587                tp->hw_status->rx_consumer,
6588                tp->hw_status->rx_mini_consumer,
6589                tp->hw_status->idx[0].rx_producer,
6590                tp->hw_status->idx[0].tx_consumer);
6591
6592         /* SW statistics block */
6593         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6594                ((u32 *)tp->hw_stats)[0],
6595                ((u32 *)tp->hw_stats)[1],
6596                ((u32 *)tp->hw_stats)[2],
6597                ((u32 *)tp->hw_stats)[3]);
6598
6599         /* Mailboxes */
6600         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6601                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6602                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6603                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6604                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6605
6606         /* NIC side send descriptors. */
6607         for (i = 0; i < 6; i++) {
6608                 unsigned long txd;
6609
6610                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6611                         + (i * sizeof(struct tg3_tx_buffer_desc));
6612                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6613                        i,
6614                        readl(txd + 0x0), readl(txd + 0x4),
6615                        readl(txd + 0x8), readl(txd + 0xc));
6616         }
6617
6618         /* NIC side RX descriptors. */
6619         for (i = 0; i < 6; i++) {
6620                 unsigned long rxd;
6621
6622                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6623                         + (i * sizeof(struct tg3_rx_buffer_desc));
6624                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6625                        i,
6626                        readl(rxd + 0x0), readl(rxd + 0x4),
6627                        readl(rxd + 0x8), readl(rxd + 0xc));
6628                 rxd += (4 * sizeof(u32));
6629                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6630                        i,
6631                        readl(rxd + 0x0), readl(rxd + 0x4),
6632                        readl(rxd + 0x8), readl(rxd + 0xc));
6633         }
6634
6635         for (i = 0; i < 6; i++) {
6636                 unsigned long rxd;
6637
6638                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6639                         + (i * sizeof(struct tg3_rx_buffer_desc));
6640                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6641                        i,
6642                        readl(rxd + 0x0), readl(rxd + 0x4),
6643                        readl(rxd + 0x8), readl(rxd + 0xc));
6644                 rxd += (4 * sizeof(u32));
6645                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6646                        i,
6647                        readl(rxd + 0x0), readl(rxd + 0x4),
6648                        readl(rxd + 0x8), readl(rxd + 0xc));
6649         }
6650 }
6651 #endif
6652
6653 static struct net_device_stats *tg3_get_stats(struct net_device *);
6654 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6655
6656 static int tg3_close(struct net_device *dev)
6657 {
6658         struct tg3 *tp = netdev_priv(dev);
6659
6660         netif_stop_queue(dev);
6661
6662         del_timer_sync(&tp->timer);
6663
6664         tg3_full_lock(tp, 1);
6665 #if 0
6666         tg3_dump_state(tp);
6667 #endif
6668
6669         tg3_disable_ints(tp);
6670
6671         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6672         tg3_free_rings(tp);
6673         tp->tg3_flags &=
6674                 ~(TG3_FLAG_INIT_COMPLETE |
6675                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6676         netif_carrier_off(tp->dev);
6677
6678         tg3_full_unlock(tp);
6679
6680         free_irq(tp->pdev->irq, dev);
6681         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6682                 pci_disable_msi(tp->pdev);
6683                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6684         }
6685
6686         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6687                sizeof(tp->net_stats_prev));
6688         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6689                sizeof(tp->estats_prev));
6690
6691         tg3_free_consistent(tp);
6692
6693         return 0;
6694 }
6695
6696 static inline unsigned long get_stat64(tg3_stat64_t *val)
6697 {
6698         unsigned long ret;
6699
6700 #if (BITS_PER_LONG == 32)
6701         ret = val->low;
6702 #else
6703         ret = ((u64)val->high << 32) | ((u64)val->low);
6704 #endif
6705         return ret;
6706 }
6707
6708 static unsigned long calc_crc_errors(struct tg3 *tp)
6709 {
6710         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6711
6712         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6713             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6714              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6715                 u32 val;
6716
6717                 spin_lock_bh(&tp->lock);
6718                 if (!tg3_readphy(tp, 0x1e, &val)) {
6719                         tg3_writephy(tp, 0x1e, val | 0x8000);
6720                         tg3_readphy(tp, 0x14, &val);
6721                 } else
6722                         val = 0;
6723                 spin_unlock_bh(&tp->lock);
6724
6725                 tp->phy_crc_errors += val;
6726
6727                 return tp->phy_crc_errors;
6728         }
6729
6730         return get_stat64(&hw_stats->rx_fcs_errors);
6731 }
6732
6733 #define ESTAT_ADD(member) \
6734         estats->member =        old_estats->member + \
6735                                 get_stat64(&hw_stats->member)
6736
6737 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6738 {
6739         struct tg3_ethtool_stats *estats = &tp->estats;
6740         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6741         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6742
6743         if (!hw_stats)
6744                 return old_estats;
6745
6746         ESTAT_ADD(rx_octets);
6747         ESTAT_ADD(rx_fragments);
6748         ESTAT_ADD(rx_ucast_packets);
6749         ESTAT_ADD(rx_mcast_packets);
6750         ESTAT_ADD(rx_bcast_packets);
6751         ESTAT_ADD(rx_fcs_errors);
6752         ESTAT_ADD(rx_align_errors);
6753         ESTAT_ADD(rx_xon_pause_rcvd);
6754         ESTAT_ADD(rx_xoff_pause_rcvd);
6755         ESTAT_ADD(rx_mac_ctrl_rcvd);
6756         ESTAT_ADD(rx_xoff_entered);
6757         ESTAT_ADD(rx_frame_too_long_errors);
6758         ESTAT_ADD(rx_jabbers);
6759         ESTAT_ADD(rx_undersize_packets);
6760         ESTAT_ADD(rx_in_length_errors);
6761         ESTAT_ADD(rx_out_length_errors);
6762         ESTAT_ADD(rx_64_or_less_octet_packets);
6763         ESTAT_ADD(rx_65_to_127_octet_packets);
6764         ESTAT_ADD(rx_128_to_255_octet_packets);
6765         ESTAT_ADD(rx_256_to_511_octet_packets);
6766         ESTAT_ADD(rx_512_to_1023_octet_packets);
6767         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6768         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6769         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6770         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6771         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6772
6773         ESTAT_ADD(tx_octets);
6774         ESTAT_ADD(tx_collisions);
6775         ESTAT_ADD(tx_xon_sent);
6776         ESTAT_ADD(tx_xoff_sent);
6777         ESTAT_ADD(tx_flow_control);
6778         ESTAT_ADD(tx_mac_errors);
6779         ESTAT_ADD(tx_single_collisions);
6780         ESTAT_ADD(tx_mult_collisions);
6781         ESTAT_ADD(tx_deferred);
6782         ESTAT_ADD(tx_excessive_collisions);
6783         ESTAT_ADD(tx_late_collisions);
6784         ESTAT_ADD(tx_collide_2times);
6785         ESTAT_ADD(tx_collide_3times);
6786         ESTAT_ADD(tx_collide_4times);
6787         ESTAT_ADD(tx_collide_5times);
6788         ESTAT_ADD(tx_collide_6times);
6789         ESTAT_ADD(tx_collide_7times);
6790         ESTAT_ADD(tx_collide_8times);
6791         ESTAT_ADD(tx_collide_9times);
6792         ESTAT_ADD(tx_collide_10times);
6793         ESTAT_ADD(tx_collide_11times);
6794         ESTAT_ADD(tx_collide_12times);
6795         ESTAT_ADD(tx_collide_13times);
6796         ESTAT_ADD(tx_collide_14times);
6797         ESTAT_ADD(tx_collide_15times);
6798         ESTAT_ADD(tx_ucast_packets);
6799         ESTAT_ADD(tx_mcast_packets);
6800         ESTAT_ADD(tx_bcast_packets);
6801         ESTAT_ADD(tx_carrier_sense_errors);
6802         ESTAT_ADD(tx_discards);
6803         ESTAT_ADD(tx_errors);
6804
6805         ESTAT_ADD(dma_writeq_full);
6806         ESTAT_ADD(dma_write_prioq_full);
6807         ESTAT_ADD(rxbds_empty);
6808         ESTAT_ADD(rx_discards);
6809         ESTAT_ADD(rx_errors);
6810         ESTAT_ADD(rx_threshold_hit);
6811
6812         ESTAT_ADD(dma_readq_full);
6813         ESTAT_ADD(dma_read_prioq_full);
6814         ESTAT_ADD(tx_comp_queue_full);
6815
6816         ESTAT_ADD(ring_set_send_prod_index);
6817         ESTAT_ADD(ring_status_update);
6818         ESTAT_ADD(nic_irqs);
6819         ESTAT_ADD(nic_avoided_irqs);
6820         ESTAT_ADD(nic_tx_threshold_hit);
6821
6822         return estats;
6823 }
6824
6825 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6826 {
6827         struct tg3 *tp = netdev_priv(dev);
6828         struct net_device_stats *stats = &tp->net_stats;
6829         struct net_device_stats *old_stats = &tp->net_stats_prev;
6830         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6831
6832         if (!hw_stats)
6833                 return old_stats;
6834
6835         stats->rx_packets = old_stats->rx_packets +
6836                 get_stat64(&hw_stats->rx_ucast_packets) +
6837                 get_stat64(&hw_stats->rx_mcast_packets) +
6838                 get_stat64(&hw_stats->rx_bcast_packets);
6839                 
6840         stats->tx_packets = old_stats->tx_packets +
6841                 get_stat64(&hw_stats->tx_ucast_packets) +
6842                 get_stat64(&hw_stats->tx_mcast_packets) +
6843                 get_stat64(&hw_stats->tx_bcast_packets);
6844
6845         stats->rx_bytes = old_stats->rx_bytes +
6846                 get_stat64(&hw_stats->rx_octets);
6847         stats->tx_bytes = old_stats->tx_bytes +
6848                 get_stat64(&hw_stats->tx_octets);
6849
6850         stats->rx_errors = old_stats->rx_errors +
6851                 get_stat64(&hw_stats->rx_errors) +
6852                 get_stat64(&hw_stats->rx_discards);
6853         stats->tx_errors = old_stats->tx_errors +
6854                 get_stat64(&hw_stats->tx_errors) +
6855                 get_stat64(&hw_stats->tx_mac_errors) +
6856                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6857                 get_stat64(&hw_stats->tx_discards);
6858
6859         stats->multicast = old_stats->multicast +
6860                 get_stat64(&hw_stats->rx_mcast_packets);
6861         stats->collisions = old_stats->collisions +
6862                 get_stat64(&hw_stats->tx_collisions);
6863
6864         stats->rx_length_errors = old_stats->rx_length_errors +
6865                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6866                 get_stat64(&hw_stats->rx_undersize_packets);
6867
6868         stats->rx_over_errors = old_stats->rx_over_errors +
6869                 get_stat64(&hw_stats->rxbds_empty);
6870         stats->rx_frame_errors = old_stats->rx_frame_errors +
6871                 get_stat64(&hw_stats->rx_align_errors);
6872         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6873                 get_stat64(&hw_stats->tx_discards);
6874         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6875                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6876
6877         stats->rx_crc_errors = old_stats->rx_crc_errors +
6878                 calc_crc_errors(tp);
6879
6880         return stats;
6881 }
6882
6883 static inline u32 calc_crc(unsigned char *buf, int len)
6884 {
6885         u32 reg;
6886         u32 tmp;
6887         int j, k;
6888
6889         reg = 0xffffffff;
6890
6891         for (j = 0; j < len; j++) {
6892                 reg ^= buf[j];
6893
6894                 for (k = 0; k < 8; k++) {
6895                         tmp = reg & 0x01;
6896
6897                         reg >>= 1;
6898
6899                         if (tmp) {
6900                                 reg ^= 0xedb88320;
6901                         }
6902                 }
6903         }
6904
6905         return ~reg;
6906 }
6907
6908 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6909 {
6910         /* accept or reject all multicast frames */
6911         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6912         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6913         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6914         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6915 }
6916
6917 static void __tg3_set_rx_mode(struct net_device *dev)
6918 {
6919         struct tg3 *tp = netdev_priv(dev);
6920         u32 rx_mode;
6921
6922         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6923                                   RX_MODE_KEEP_VLAN_TAG);
6924
6925         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6926          * flag clear.
6927          */
6928 #if TG3_VLAN_TAG_USED
6929         if (!tp->vlgrp &&
6930             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6931                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6932 #else
6933         /* By definition, VLAN is disabled always in this
6934          * case.
6935          */
6936         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6937                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6938 #endif
6939
6940         if (dev->flags & IFF_PROMISC) {
6941                 /* Promiscuous mode. */
6942                 rx_mode |= RX_MODE_PROMISC;
6943         } else if (dev->flags & IFF_ALLMULTI) {
6944                 /* Accept all multicast. */
6945                 tg3_set_multi (tp, 1);
6946         } else if (dev->mc_count < 1) {
6947                 /* Reject all multicast. */
6948                 tg3_set_multi (tp, 0);
6949         } else {
6950                 /* Accept one or more multicast(s). */
6951                 struct dev_mc_list *mclist;
6952                 unsigned int i;
6953                 u32 mc_filter[4] = { 0, };
6954                 u32 regidx;
6955                 u32 bit;
6956                 u32 crc;
6957
6958                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6959                      i++, mclist = mclist->next) {
6960
6961                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6962                         bit = ~crc & 0x7f;
6963                         regidx = (bit & 0x60) >> 5;
6964                         bit &= 0x1f;
6965                         mc_filter[regidx] |= (1 << bit);
6966                 }
6967
6968                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6969                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6970                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6971                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6972         }
6973
6974         if (rx_mode != tp->rx_mode) {
6975                 tp->rx_mode = rx_mode;
6976                 tw32_f(MAC_RX_MODE, rx_mode);
6977                 udelay(10);
6978         }
6979 }
6980
6981 static void tg3_set_rx_mode(struct net_device *dev)
6982 {
6983         struct tg3 *tp = netdev_priv(dev);
6984
6985         tg3_full_lock(tp, 0);
6986         __tg3_set_rx_mode(dev);
6987         tg3_full_unlock(tp);
6988 }
6989
6990 #define TG3_REGDUMP_LEN         (32 * 1024)
6991
6992 static int tg3_get_regs_len(struct net_device *dev)
6993 {
6994         return TG3_REGDUMP_LEN;
6995 }
6996
6997 static void tg3_get_regs(struct net_device *dev,
6998                 struct ethtool_regs *regs, void *_p)
6999 {
7000         u32 *p = _p;
7001         struct tg3 *tp = netdev_priv(dev);
7002         u8 *orig_p = _p;
7003         int i;
7004
7005         regs->version = 0;
7006
7007         memset(p, 0, TG3_REGDUMP_LEN);
7008
7009         tg3_full_lock(tp, 0);
7010
7011 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7012 #define GET_REG32_LOOP(base,len)                \
7013 do {    p = (u32 *)(orig_p + (base));           \
7014         for (i = 0; i < len; i += 4)            \
7015                 __GET_REG32((base) + i);        \
7016 } while (0)
7017 #define GET_REG32_1(reg)                        \
7018 do {    p = (u32 *)(orig_p + (reg));            \
7019         __GET_REG32((reg));                     \
7020 } while (0)
7021
7022         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7023         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7024         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7025         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7026         GET_REG32_1(SNDDATAC_MODE);
7027         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7028         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7029         GET_REG32_1(SNDBDC_MODE);
7030         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7031         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7032         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7033         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7034         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7035         GET_REG32_1(RCVDCC_MODE);
7036         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7037         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7038         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7039         GET_REG32_1(MBFREE_MODE);
7040         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7041         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7042         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7043         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7044         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7045         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
7046         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
7047         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7048         GET_REG32_LOOP(FTQ_RESET, 0x120);
7049         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7050         GET_REG32_1(DMAC_MODE);
7051         GET_REG32_LOOP(GRC_MODE, 0x4c);
7052         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7053                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7054
7055 #undef __GET_REG32
7056 #undef GET_REG32_LOOP
7057 #undef GET_REG32_1
7058
7059         tg3_full_unlock(tp);
7060 }
7061
7062 static int tg3_get_eeprom_len(struct net_device *dev)
7063 {
7064         struct tg3 *tp = netdev_priv(dev);
7065
7066         return tp->nvram_size;
7067 }
7068
7069 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7070
7071 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7072 {
7073         struct tg3 *tp = netdev_priv(dev);
7074         int ret;
7075         u8  *pd;
7076         u32 i, offset, len, val, b_offset, b_count;
7077
7078         offset = eeprom->offset;
7079         len = eeprom->len;
7080         eeprom->len = 0;
7081
7082         eeprom->magic = TG3_EEPROM_MAGIC;
7083
7084         if (offset & 3) {
7085                 /* adjustments to start on required 4 byte boundary */
7086                 b_offset = offset & 3;
7087                 b_count = 4 - b_offset;
7088                 if (b_count > len) {
7089                         /* i.e. offset=1 len=2 */
7090                         b_count = len;
7091                 }
7092                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7093                 if (ret)
7094                         return ret;
7095                 val = cpu_to_le32(val);
7096                 memcpy(data, ((char*)&val) + b_offset, b_count);
7097                 len -= b_count;
7098                 offset += b_count;
7099                 eeprom->len += b_count;
7100         }
7101
7102         /* read bytes upto the last 4 byte boundary */
7103         pd = &data[eeprom->len];
7104         for (i = 0; i < (len - (len & 3)); i += 4) {
7105                 ret = tg3_nvram_read(tp, offset + i, &val);
7106                 if (ret) {
7107                         eeprom->len += i;
7108                         return ret;
7109                 }
7110                 val = cpu_to_le32(val);
7111                 memcpy(pd + i, &val, 4);
7112         }
7113         eeprom->len += i;
7114
7115         if (len & 3) {
7116                 /* read last bytes not ending on 4 byte boundary */
7117                 pd = &data[eeprom->len];
7118                 b_count = len & 3;
7119                 b_offset = offset + len - b_count;
7120                 ret = tg3_nvram_read(tp, b_offset, &val);
7121                 if (ret)
7122                         return ret;
7123                 val = cpu_to_le32(val);
7124                 memcpy(pd, ((char*)&val), b_count);
7125                 eeprom->len += b_count;
7126         }
7127         return 0;
7128 }
7129
7130 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7131
7132 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7133 {
7134         struct tg3 *tp = netdev_priv(dev);
7135         int ret;
7136         u32 offset, len, b_offset, odd_len, start, end;
7137         u8 *buf;
7138
7139         if (eeprom->magic != TG3_EEPROM_MAGIC)
7140                 return -EINVAL;
7141
7142         offset = eeprom->offset;
7143         len = eeprom->len;
7144
7145         if ((b_offset = (offset & 3))) {
7146                 /* adjustments to start on required 4 byte boundary */
7147                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7148                 if (ret)
7149                         return ret;
7150                 start = cpu_to_le32(start);
7151                 len += b_offset;
7152                 offset &= ~3;
7153                 if (len < 4)
7154                         len = 4;
7155         }
7156
7157         odd_len = 0;
7158         if (len & 3) {
7159                 /* adjustments to end on required 4 byte boundary */
7160                 odd_len = 1;
7161                 len = (len + 3) & ~3;
7162                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7163                 if (ret)
7164                         return ret;
7165                 end = cpu_to_le32(end);
7166         }
7167
7168         buf = data;
7169         if (b_offset || odd_len) {
7170                 buf = kmalloc(len, GFP_KERNEL);
7171                 if (buf == 0)
7172                         return -ENOMEM;
7173                 if (b_offset)
7174                         memcpy(buf, &start, 4);
7175                 if (odd_len)
7176                         memcpy(buf+len-4, &end, 4);
7177                 memcpy(buf + b_offset, data, eeprom->len);
7178         }
7179
7180         ret = tg3_nvram_write_block(tp, offset, len, buf);
7181
7182         if (buf != data)
7183                 kfree(buf);
7184
7185         return ret;
7186 }
7187
7188 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7189 {
7190         struct tg3 *tp = netdev_priv(dev);
7191   
7192         cmd->supported = (SUPPORTED_Autoneg);
7193
7194         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7195                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7196                                    SUPPORTED_1000baseT_Full);
7197
7198         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
7199                 cmd->supported |= (SUPPORTED_100baseT_Half |
7200                                   SUPPORTED_100baseT_Full |
7201                                   SUPPORTED_10baseT_Half |
7202                                   SUPPORTED_10baseT_Full |
7203                                   SUPPORTED_MII);
7204         else
7205                 cmd->supported |= SUPPORTED_FIBRE;
7206   
7207         cmd->advertising = tp->link_config.advertising;
7208         if (netif_running(dev)) {
7209                 cmd->speed = tp->link_config.active_speed;
7210                 cmd->duplex = tp->link_config.active_duplex;
7211         }
7212         cmd->port = 0;
7213         cmd->phy_address = PHY_ADDR;
7214         cmd->transceiver = 0;
7215         cmd->autoneg = tp->link_config.autoneg;
7216         cmd->maxtxpkt = 0;
7217         cmd->maxrxpkt = 0;
7218         return 0;
7219 }
7220   
7221 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7222 {
7223         struct tg3 *tp = netdev_priv(dev);
7224   
7225         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7226                 /* These are the only valid advertisement bits allowed.  */
7227                 if (cmd->autoneg == AUTONEG_ENABLE &&
7228                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7229                                           ADVERTISED_1000baseT_Full |
7230                                           ADVERTISED_Autoneg |
7231                                           ADVERTISED_FIBRE)))
7232                         return -EINVAL;
7233         }
7234
7235         tg3_full_lock(tp, 0);
7236
7237         tp->link_config.autoneg = cmd->autoneg;
7238         if (cmd->autoneg == AUTONEG_ENABLE) {
7239                 tp->link_config.advertising = cmd->advertising;
7240                 tp->link_config.speed = SPEED_INVALID;
7241                 tp->link_config.duplex = DUPLEX_INVALID;
7242         } else {
7243                 tp->link_config.advertising = 0;
7244                 tp->link_config.speed = cmd->speed;
7245                 tp->link_config.duplex = cmd->duplex;
7246         }
7247   
7248         if (netif_running(dev))
7249                 tg3_setup_phy(tp, 1);
7250
7251         tg3_full_unlock(tp);
7252   
7253         return 0;
7254 }
7255   
7256 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7257 {
7258         struct tg3 *tp = netdev_priv(dev);
7259   
7260         strcpy(info->driver, DRV_MODULE_NAME);
7261         strcpy(info->version, DRV_MODULE_VERSION);
7262         strcpy(info->bus_info, pci_name(tp->pdev));
7263 }
7264   
7265 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7266 {
7267         struct tg3 *tp = netdev_priv(dev);
7268   
7269         wol->supported = WAKE_MAGIC;
7270         wol->wolopts = 0;
7271         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7272                 wol->wolopts = WAKE_MAGIC;
7273         memset(&wol->sopass, 0, sizeof(wol->sopass));
7274 }
7275   
7276 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7277 {
7278         struct tg3 *tp = netdev_priv(dev);
7279   
7280         if (wol->wolopts & ~WAKE_MAGIC)
7281                 return -EINVAL;
7282         if ((wol->wolopts & WAKE_MAGIC) &&
7283             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7284             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7285                 return -EINVAL;
7286   
7287         spin_lock_bh(&tp->lock);
7288         if (wol->wolopts & WAKE_MAGIC)
7289                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7290         else
7291                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7292         spin_unlock_bh(&tp->lock);
7293   
7294         return 0;
7295 }
7296   
7297 static u32 tg3_get_msglevel(struct net_device *dev)
7298 {
7299         struct tg3 *tp = netdev_priv(dev);
7300         return tp->msg_enable;
7301 }
7302   
7303 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7304 {
7305         struct tg3 *tp = netdev_priv(dev);
7306         tp->msg_enable = value;
7307 }
7308   
7309 #if TG3_TSO_SUPPORT != 0
7310 static int tg3_set_tso(struct net_device *dev, u32 value)
7311 {
7312         struct tg3 *tp = netdev_priv(dev);
7313
7314         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7315                 if (value)
7316                         return -EINVAL;
7317                 return 0;
7318         }
7319         return ethtool_op_set_tso(dev, value);
7320 }
7321 #endif
7322   
7323 static int tg3_nway_reset(struct net_device *dev)
7324 {
7325         struct tg3 *tp = netdev_priv(dev);
7326         u32 bmcr;
7327         int r;
7328   
7329         if (!netif_running(dev))
7330                 return -EAGAIN;
7331
7332         spin_lock_bh(&tp->lock);
7333         r = -EINVAL;
7334         tg3_readphy(tp, MII_BMCR, &bmcr);
7335         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7336             (bmcr & BMCR_ANENABLE)) {
7337                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
7338                 r = 0;
7339         }
7340         spin_unlock_bh(&tp->lock);
7341   
7342         return r;
7343 }
7344   
7345 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7346 {
7347         struct tg3 *tp = netdev_priv(dev);
7348   
7349         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7350         ering->rx_mini_max_pending = 0;
7351         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7352
7353         ering->rx_pending = tp->rx_pending;
7354         ering->rx_mini_pending = 0;
7355         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7356         ering->tx_pending = tp->tx_pending;
7357 }
7358   
7359 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7360 {
7361         struct tg3 *tp = netdev_priv(dev);
7362         int irq_sync = 0;
7363   
7364         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7365             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7366             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7367                 return -EINVAL;
7368   
7369         if (netif_running(dev)) {
7370                 tg3_netif_stop(tp);
7371                 irq_sync = 1;
7372         }
7373
7374         tg3_full_lock(tp, irq_sync);
7375   
7376         tp->rx_pending = ering->rx_pending;
7377
7378         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7379             tp->rx_pending > 63)
7380                 tp->rx_pending = 63;
7381         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7382         tp->tx_pending = ering->tx_pending;
7383
7384         if (netif_running(dev)) {
7385                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7386                 tg3_init_hw(tp);
7387                 tg3_netif_start(tp);
7388         }
7389
7390         tg3_full_unlock(tp);
7391   
7392         return 0;
7393 }
7394   
7395 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7396 {
7397         struct tg3 *tp = netdev_priv(dev);
7398   
7399         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7400         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7401         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7402 }
7403   
7404 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7405 {
7406         struct tg3 *tp = netdev_priv(dev);
7407         int irq_sync = 0;
7408   
7409         if (netif_running(dev)) {
7410                 tg3_netif_stop(tp);
7411                 irq_sync = 1;
7412         }
7413
7414         tg3_full_lock(tp, irq_sync);
7415
7416         if (epause->autoneg)
7417                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7418         else
7419                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7420         if (epause->rx_pause)
7421                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7422         else
7423                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7424         if (epause->tx_pause)
7425                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7426         else
7427                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7428
7429         if (netif_running(dev)) {
7430                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7431                 tg3_init_hw(tp);
7432                 tg3_netif_start(tp);
7433         }
7434
7435         tg3_full_unlock(tp);
7436   
7437         return 0;
7438 }
7439   
7440 static u32 tg3_get_rx_csum(struct net_device *dev)
7441 {
7442         struct tg3 *tp = netdev_priv(dev);
7443         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7444 }
7445   
7446 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7447 {
7448         struct tg3 *tp = netdev_priv(dev);
7449   
7450         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7451                 if (data != 0)
7452                         return -EINVAL;
7453                 return 0;
7454         }
7455   
7456         spin_lock_bh(&tp->lock);
7457         if (data)
7458                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7459         else
7460                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7461         spin_unlock_bh(&tp->lock);
7462   
7463         return 0;
7464 }
7465   
7466 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7467 {
7468         struct tg3 *tp = netdev_priv(dev);
7469   
7470         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7471                 if (data != 0)
7472                         return -EINVAL;
7473                 return 0;
7474         }
7475   
7476         if (data)
7477                 dev->features |= NETIF_F_IP_CSUM;
7478         else
7479                 dev->features &= ~NETIF_F_IP_CSUM;
7480
7481         return 0;
7482 }
7483
7484 static int tg3_get_stats_count (struct net_device *dev)
7485 {
7486         return TG3_NUM_STATS;
7487 }
7488
7489 static int tg3_get_test_count (struct net_device *dev)
7490 {
7491         return TG3_NUM_TEST;
7492 }
7493
7494 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7495 {
7496         switch (stringset) {
7497         case ETH_SS_STATS:
7498                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7499                 break;
7500         case ETH_SS_TEST:
7501                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7502                 break;
7503         default:
7504                 WARN_ON(1);     /* we need a WARN() */
7505                 break;
7506         }
7507 }
7508
7509 static void tg3_get_ethtool_stats (struct net_device *dev,
7510                                    struct ethtool_stats *estats, u64 *tmp_stats)
7511 {
7512         struct tg3 *tp = netdev_priv(dev);
7513         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7514 }
7515
7516 #define NVRAM_TEST_SIZE 0x100
7517
7518 static int tg3_test_nvram(struct tg3 *tp)
7519 {
7520         u32 *buf, csum;
7521         int i, j, err = 0;
7522
7523         buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7524         if (buf == NULL)
7525                 return -ENOMEM;
7526
7527         for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7528                 u32 val;
7529
7530                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7531                         break;
7532                 buf[j] = cpu_to_le32(val);
7533         }
7534         if (i < NVRAM_TEST_SIZE)
7535                 goto out;
7536
7537         err = -EIO;
7538         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7539                 goto out;
7540
7541         /* Bootstrap checksum at offset 0x10 */
7542         csum = calc_crc((unsigned char *) buf, 0x10);
7543         if(csum != cpu_to_le32(buf[0x10/4]))
7544                 goto out;
7545
7546         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7547         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7548         if (csum != cpu_to_le32(buf[0xfc/4]))
7549                  goto out;
7550
7551         err = 0;
7552
7553 out:
7554         kfree(buf);
7555         return err;
7556 }
7557
7558 #define TG3_SERDES_TIMEOUT_SEC  2
7559 #define TG3_COPPER_TIMEOUT_SEC  6
7560
7561 static int tg3_test_link(struct tg3 *tp)
7562 {
7563         int i, max;
7564
7565         if (!netif_running(tp->dev))
7566                 return -ENODEV;
7567
7568         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7569                 max = TG3_SERDES_TIMEOUT_SEC;
7570         else
7571                 max = TG3_COPPER_TIMEOUT_SEC;
7572
7573         for (i = 0; i < max; i++) {
7574                 if (netif_carrier_ok(tp->dev))
7575                         return 0;
7576
7577                 if (msleep_interruptible(1000))
7578                         break;
7579         }
7580
7581         return -EIO;
7582 }
7583
7584 /* Only test the commonly used registers */
7585 static int tg3_test_registers(struct tg3 *tp)
7586 {
7587         int i, is_5705;
7588         u32 offset, read_mask, write_mask, val, save_val, read_val;
7589         static struct {
7590                 u16 offset;
7591                 u16 flags;
7592 #define TG3_FL_5705     0x1
7593 #define TG3_FL_NOT_5705 0x2
7594 #define TG3_FL_NOT_5788 0x4
7595                 u32 read_mask;
7596                 u32 write_mask;
7597         } reg_tbl[] = {
7598                 /* MAC Control Registers */
7599                 { MAC_MODE, TG3_FL_NOT_5705,
7600                         0x00000000, 0x00ef6f8c },
7601                 { MAC_MODE, TG3_FL_5705,
7602                         0x00000000, 0x01ef6b8c },
7603                 { MAC_STATUS, TG3_FL_NOT_5705,
7604                         0x03800107, 0x00000000 },
7605                 { MAC_STATUS, TG3_FL_5705,
7606                         0x03800100, 0x00000000 },
7607                 { MAC_ADDR_0_HIGH, 0x0000,
7608                         0x00000000, 0x0000ffff },
7609                 { MAC_ADDR_0_LOW, 0x0000,
7610                         0x00000000, 0xffffffff },
7611                 { MAC_RX_MTU_SIZE, 0x0000,
7612                         0x00000000, 0x0000ffff },
7613                 { MAC_TX_MODE, 0x0000,
7614                         0x00000000, 0x00000070 },
7615                 { MAC_TX_LENGTHS, 0x0000,
7616                         0x00000000, 0x00003fff },
7617                 { MAC_RX_MODE, TG3_FL_NOT_5705,
7618                         0x00000000, 0x000007fc },
7619                 { MAC_RX_MODE, TG3_FL_5705,
7620                         0x00000000, 0x000007dc },
7621                 { MAC_HASH_REG_0, 0x0000,
7622                         0x00000000, 0xffffffff },
7623                 { MAC_HASH_REG_1, 0x0000,
7624                         0x00000000, 0xffffffff },
7625                 { MAC_HASH_REG_2, 0x0000,
7626                         0x00000000, 0xffffffff },
7627                 { MAC_HASH_REG_3, 0x0000,
7628                         0x00000000, 0xffffffff },
7629
7630                 /* Receive Data and Receive BD Initiator Control Registers. */
7631                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7632                         0x00000000, 0xffffffff },
7633                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7634                         0x00000000, 0xffffffff },
7635                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7636                         0x00000000, 0x00000003 },
7637                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7638                         0x00000000, 0xffffffff },
7639                 { RCVDBDI_STD_BD+0, 0x0000,
7640                         0x00000000, 0xffffffff },
7641                 { RCVDBDI_STD_BD+4, 0x0000,
7642                         0x00000000, 0xffffffff },
7643                 { RCVDBDI_STD_BD+8, 0x0000,
7644                         0x00000000, 0xffff0002 },
7645                 { RCVDBDI_STD_BD+0xc, 0x0000,
7646                         0x00000000, 0xffffffff },
7647         
7648                 /* Receive BD Initiator Control Registers. */
7649                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7650                         0x00000000, 0xffffffff },
7651                 { RCVBDI_STD_THRESH, TG3_FL_5705,
7652                         0x00000000, 0x000003ff },
7653                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7654                         0x00000000, 0xffffffff },
7655         
7656                 /* Host Coalescing Control Registers. */
7657                 { HOSTCC_MODE, TG3_FL_NOT_5705,
7658                         0x00000000, 0x00000004 },
7659                 { HOSTCC_MODE, TG3_FL_5705,
7660                         0x00000000, 0x000000f6 },
7661                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7662                         0x00000000, 0xffffffff },
7663                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7664                         0x00000000, 0x000003ff },
7665                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7666                         0x00000000, 0xffffffff },
7667                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7668                         0x00000000, 0x000003ff },
7669                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7670                         0x00000000, 0xffffffff },
7671                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7672                         0x00000000, 0x000000ff },
7673                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7674                         0x00000000, 0xffffffff },
7675                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7676                         0x00000000, 0x000000ff },
7677                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7678                         0x00000000, 0xffffffff },
7679                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7680                         0x00000000, 0xffffffff },
7681                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7682                         0x00000000, 0xffffffff },
7683                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7684                         0x00000000, 0x000000ff },
7685                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7686                         0x00000000, 0xffffffff },
7687                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7688                         0x00000000, 0x000000ff },
7689                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7690                         0x00000000, 0xffffffff },
7691                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7692                         0x00000000, 0xffffffff },
7693                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7694                         0x00000000, 0xffffffff },
7695                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7696                         0x00000000, 0xffffffff },
7697                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7698                         0x00000000, 0xffffffff },
7699                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7700                         0xffffffff, 0x00000000 },
7701                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7702                         0xffffffff, 0x00000000 },
7703
7704                 /* Buffer Manager Control Registers. */
7705                 { BUFMGR_MB_POOL_ADDR, 0x0000,
7706                         0x00000000, 0x007fff80 },
7707                 { BUFMGR_MB_POOL_SIZE, 0x0000,
7708                         0x00000000, 0x007fffff },
7709                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7710                         0x00000000, 0x0000003f },
7711                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7712                         0x00000000, 0x000001ff },
7713                 { BUFMGR_MB_HIGH_WATER, 0x0000,
7714                         0x00000000, 0x000001ff },
7715                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7716                         0xffffffff, 0x00000000 },
7717                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7718                         0xffffffff, 0x00000000 },
7719         
7720                 /* Mailbox Registers */
7721                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7722                         0x00000000, 0x000001ff },
7723                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7724                         0x00000000, 0x000001ff },
7725                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7726                         0x00000000, 0x000007ff },
7727                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7728                         0x00000000, 0x000001ff },
7729
7730                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7731         };
7732
7733         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7734                 is_5705 = 1;
7735         else
7736                 is_5705 = 0;
7737
7738         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7739                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7740                         continue;
7741
7742                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7743                         continue;
7744
7745                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7746                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
7747                         continue;
7748
7749                 offset = (u32) reg_tbl[i].offset;
7750                 read_mask = reg_tbl[i].read_mask;
7751                 write_mask = reg_tbl[i].write_mask;
7752
7753                 /* Save the original register content */
7754                 save_val = tr32(offset);
7755
7756                 /* Determine the read-only value. */
7757                 read_val = save_val & read_mask;
7758
7759                 /* Write zero to the register, then make sure the read-only bits
7760                  * are not changed and the read/write bits are all zeros.
7761                  */
7762                 tw32(offset, 0);
7763
7764                 val = tr32(offset);
7765
7766                 /* Test the read-only and read/write bits. */
7767                 if (((val & read_mask) != read_val) || (val & write_mask))
7768                         goto out;
7769
7770                 /* Write ones to all the bits defined by RdMask and WrMask, then
7771                  * make sure the read-only bits are not changed and the
7772                  * read/write bits are all ones.
7773                  */
7774                 tw32(offset, read_mask | write_mask);
7775
7776                 val = tr32(offset);
7777
7778                 /* Test the read-only bits. */
7779                 if ((val & read_mask) != read_val)
7780                         goto out;
7781
7782                 /* Test the read/write bits. */
7783                 if ((val & write_mask) != write_mask)
7784                         goto out;
7785
7786                 tw32(offset, save_val);
7787         }
7788
7789         return 0;
7790
7791 out:
7792         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7793         tw32(offset, save_val);
7794         return -EIO;
7795 }
7796
7797 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7798 {
7799         static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7800         int i;
7801         u32 j;
7802
7803         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7804                 for (j = 0; j < len; j += 4) {
7805                         u32 val;
7806
7807                         tg3_write_mem(tp, offset + j, test_pattern[i]);
7808                         tg3_read_mem(tp, offset + j, &val);
7809                         if (val != test_pattern[i])
7810                                 return -EIO;
7811                 }
7812         }
7813         return 0;
7814 }
7815
7816 static int tg3_test_memory(struct tg3 *tp)
7817 {
7818         static struct mem_entry {
7819                 u32 offset;
7820                 u32 len;
7821         } mem_tbl_570x[] = {
7822                 { 0x00000000, 0x01000},
7823                 { 0x00002000, 0x1c000},
7824                 { 0xffffffff, 0x00000}
7825         }, mem_tbl_5705[] = {
7826                 { 0x00000100, 0x0000c},
7827                 { 0x00000200, 0x00008},
7828                 { 0x00000b50, 0x00400},
7829                 { 0x00004000, 0x00800},
7830                 { 0x00006000, 0x01000},
7831                 { 0x00008000, 0x02000},
7832                 { 0x00010000, 0x0e000},
7833                 { 0xffffffff, 0x00000}
7834         };
7835         struct mem_entry *mem_tbl;
7836         int err = 0;
7837         int i;
7838
7839         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7840                 mem_tbl = mem_tbl_5705;
7841         else
7842                 mem_tbl = mem_tbl_570x;
7843
7844         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7845                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7846                     mem_tbl[i].len)) != 0)
7847                         break;
7848         }
7849         
7850         return err;
7851 }
7852
7853 static int tg3_test_loopback(struct tg3 *tp)
7854 {
7855         u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7856         u32 desc_idx;
7857         struct sk_buff *skb, *rx_skb;
7858         u8 *tx_data;
7859         dma_addr_t map;
7860         int num_pkts, tx_len, rx_len, i, err;
7861         struct tg3_rx_buffer_desc *desc;
7862
7863         if (!netif_running(tp->dev))
7864                 return -ENODEV;
7865
7866         err = -EIO;
7867
7868         tg3_abort_hw(tp, 1);
7869
7870         tg3_reset_hw(tp);
7871
7872         mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7873                    MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7874                    MAC_MODE_PORT_MODE_GMII;
7875         tw32(MAC_MODE, mac_mode);
7876
7877         tx_len = 1514;
7878         skb = dev_alloc_skb(tx_len);
7879         tx_data = skb_put(skb, tx_len);
7880         memcpy(tx_data, tp->dev->dev_addr, 6);
7881         memset(tx_data + 6, 0x0, 8);
7882
7883         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7884
7885         for (i = 14; i < tx_len; i++)
7886                 tx_data[i] = (u8) (i & 0xff);
7887
7888         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7889
7890         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7891              HOSTCC_MODE_NOW);
7892
7893         udelay(10);
7894
7895         rx_start_idx = tp->hw_status->idx[0].rx_producer;
7896
7897         send_idx = 0;
7898         num_pkts = 0;
7899
7900         tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7901
7902         send_idx++;
7903         num_pkts++;
7904
7905         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7906         tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7907
7908         udelay(10);
7909
7910         for (i = 0; i < 10; i++) {
7911                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7912                        HOSTCC_MODE_NOW);
7913
7914                 udelay(10);
7915
7916                 tx_idx = tp->hw_status->idx[0].tx_consumer;
7917                 rx_idx = tp->hw_status->idx[0].rx_producer;
7918                 if ((tx_idx == send_idx) &&
7919                     (rx_idx == (rx_start_idx + num_pkts)))
7920                         break;
7921         }
7922
7923         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7924         dev_kfree_skb(skb);
7925
7926         if (tx_idx != send_idx)
7927                 goto out;
7928
7929         if (rx_idx != rx_start_idx + num_pkts)
7930                 goto out;
7931
7932         desc = &tp->rx_rcb[rx_start_idx];
7933         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7934         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7935         if (opaque_key != RXD_OPAQUE_RING_STD)
7936                 goto out;
7937
7938         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7939             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7940                 goto out;
7941
7942         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7943         if (rx_len != tx_len)
7944                 goto out;
7945
7946         rx_skb = tp->rx_std_buffers[desc_idx].skb;
7947
7948         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7949         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7950
7951         for (i = 14; i < tx_len; i++) {
7952                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7953                         goto out;
7954         }
7955         err = 0;
7956         
7957         /* tg3_free_rings will unmap and free the rx_skb */
7958 out:
7959         return err;
7960 }
7961
7962 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7963                           u64 *data)
7964 {
7965         struct tg3 *tp = netdev_priv(dev);
7966
7967         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7968
7969         if (tg3_test_nvram(tp) != 0) {
7970                 etest->flags |= ETH_TEST_FL_FAILED;
7971                 data[0] = 1;
7972         }
7973         if (tg3_test_link(tp) != 0) {
7974                 etest->flags |= ETH_TEST_FL_FAILED;
7975                 data[1] = 1;
7976         }
7977         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7978                 int irq_sync = 0;
7979
7980                 if (netif_running(dev)) {
7981                         tg3_netif_stop(tp);
7982                         irq_sync = 1;
7983                 }
7984
7985                 tg3_full_lock(tp, irq_sync);
7986
7987                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7988                 tg3_nvram_lock(tp);
7989                 tg3_halt_cpu(tp, RX_CPU_BASE);
7990                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7991                         tg3_halt_cpu(tp, TX_CPU_BASE);
7992                 tg3_nvram_unlock(tp);
7993
7994                 if (tg3_test_registers(tp) != 0) {
7995                         etest->flags |= ETH_TEST_FL_FAILED;
7996                         data[2] = 1;
7997                 }
7998                 if (tg3_test_memory(tp) != 0) {
7999                         etest->flags |= ETH_TEST_FL_FAILED;
8000                         data[3] = 1;
8001                 }
8002                 if (tg3_test_loopback(tp) != 0) {
8003                         etest->flags |= ETH_TEST_FL_FAILED;
8004                         data[4] = 1;
8005                 }
8006
8007                 tg3_full_unlock(tp);
8008
8009                 if (tg3_test_interrupt(tp) != 0) {
8010                         etest->flags |= ETH_TEST_FL_FAILED;
8011                         data[5] = 1;
8012                 }
8013
8014                 tg3_full_lock(tp, 0);
8015
8016                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017                 if (netif_running(dev)) {
8018                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8019                         tg3_init_hw(tp);
8020                         tg3_netif_start(tp);
8021                 }
8022
8023                 tg3_full_unlock(tp);
8024         }
8025 }
8026
8027 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8028 {
8029         struct mii_ioctl_data *data = if_mii(ifr);
8030         struct tg3 *tp = netdev_priv(dev);
8031         int err;
8032
8033         switch(cmd) {
8034         case SIOCGMIIPHY:
8035                 data->phy_id = PHY_ADDR;
8036
8037                 /* fallthru */
8038         case SIOCGMIIREG: {
8039                 u32 mii_regval;
8040
8041                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8042                         break;                  /* We have no PHY */
8043
8044                 spin_lock_bh(&tp->lock);
8045                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8046                 spin_unlock_bh(&tp->lock);
8047
8048                 data->val_out = mii_regval;
8049
8050                 return err;
8051         }
8052
8053         case SIOCSMIIREG:
8054                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8055                         break;                  /* We have no PHY */
8056
8057                 if (!capable(CAP_NET_ADMIN))
8058                         return -EPERM;
8059
8060                 spin_lock_bh(&tp->lock);
8061                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8062                 spin_unlock_bh(&tp->lock);
8063
8064                 return err;
8065
8066         default:
8067                 /* do nothing */
8068                 break;
8069         }
8070         return -EOPNOTSUPP;
8071 }
8072
8073 #if TG3_VLAN_TAG_USED
8074 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8075 {
8076         struct tg3 *tp = netdev_priv(dev);
8077
8078         tg3_full_lock(tp, 0);
8079
8080         tp->vlgrp = grp;
8081
8082         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8083         __tg3_set_rx_mode(dev);
8084
8085         tg3_full_unlock(tp);
8086 }
8087
8088 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8089 {
8090         struct tg3 *tp = netdev_priv(dev);
8091
8092         tg3_full_lock(tp, 0);
8093         if (tp->vlgrp)
8094                 tp->vlgrp->vlan_devices[vid] = NULL;
8095         tg3_full_unlock(tp);
8096 }
8097 #endif
8098
8099 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8100 {
8101         struct tg3 *tp = netdev_priv(dev);
8102
8103         memcpy(ec, &tp->coal, sizeof(*ec));
8104         return 0;
8105 }
8106
8107 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8108 {
8109         struct tg3 *tp = netdev_priv(dev);
8110         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8111         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8112
8113         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8114                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8115                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8116                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8117                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8118         }
8119
8120         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8121             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8122             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8123             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8124             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8125             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8126             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8127             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8128             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8129             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8130                 return -EINVAL;
8131
8132         /* No rx interrupts will be generated if both are zero */
8133         if ((ec->rx_coalesce_usecs == 0) &&
8134             (ec->rx_max_coalesced_frames == 0))
8135                 return -EINVAL;
8136
8137         /* No tx interrupts will be generated if both are zero */
8138         if ((ec->tx_coalesce_usecs == 0) &&
8139             (ec->tx_max_coalesced_frames == 0))
8140                 return -EINVAL;
8141
8142         /* Only copy relevant parameters, ignore all others. */
8143         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8144         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8145         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8146         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8147         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8148         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8149         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8150         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8151         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8152
8153         if (netif_running(dev)) {
8154                 tg3_full_lock(tp, 0);
8155                 __tg3_set_coalesce(tp, &tp->coal);
8156                 tg3_full_unlock(tp);
8157         }
8158         return 0;
8159 }
8160
8161 static struct ethtool_ops tg3_ethtool_ops = {
8162         .get_settings           = tg3_get_settings,
8163         .set_settings           = tg3_set_settings,
8164         .get_drvinfo            = tg3_get_drvinfo,
8165         .get_regs_len           = tg3_get_regs_len,
8166         .get_regs               = tg3_get_regs,
8167         .get_wol                = tg3_get_wol,
8168         .set_wol                = tg3_set_wol,
8169         .get_msglevel           = tg3_get_msglevel,
8170         .set_msglevel           = tg3_set_msglevel,
8171         .nway_reset             = tg3_nway_reset,
8172         .get_link               = ethtool_op_get_link,
8173         .get_eeprom_len         = tg3_get_eeprom_len,
8174         .get_eeprom             = tg3_get_eeprom,
8175         .set_eeprom             = tg3_set_eeprom,
8176         .get_ringparam          = tg3_get_ringparam,
8177         .set_ringparam          = tg3_set_ringparam,
8178         .get_pauseparam         = tg3_get_pauseparam,
8179         .set_pauseparam         = tg3_set_pauseparam,
8180         .get_rx_csum            = tg3_get_rx_csum,
8181         .set_rx_csum            = tg3_set_rx_csum,
8182         .get_tx_csum            = ethtool_op_get_tx_csum,
8183         .set_tx_csum            = tg3_set_tx_csum,
8184         .get_sg                 = ethtool_op_get_sg,
8185         .set_sg                 = ethtool_op_set_sg,
8186 #if TG3_TSO_SUPPORT != 0
8187         .get_tso                = ethtool_op_get_tso,
8188         .set_tso                = tg3_set_tso,
8189 #endif
8190         .self_test_count        = tg3_get_test_count,
8191         .self_test              = tg3_self_test,
8192         .get_strings            = tg3_get_strings,
8193         .get_stats_count        = tg3_get_stats_count,
8194         .get_ethtool_stats      = tg3_get_ethtool_stats,
8195         .get_coalesce           = tg3_get_coalesce,
8196         .set_coalesce           = tg3_set_coalesce,
8197 };
8198
8199 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8200 {
8201         u32 cursize, val;
8202
8203         tp->nvram_size = EEPROM_CHIP_SIZE;
8204
8205         if (tg3_nvram_read(tp, 0, &val) != 0)
8206                 return;
8207
8208         if (swab32(val) != TG3_EEPROM_MAGIC)
8209                 return;
8210
8211         /*
8212          * Size the chip by reading offsets at increasing powers of two.
8213          * When we encounter our validation signature, we know the addressing
8214          * has wrapped around, and thus have our chip size.
8215          */
8216         cursize = 0x800;
8217
8218         while (cursize < tp->nvram_size) {
8219                 if (tg3_nvram_read(tp, cursize, &val) != 0)
8220                         return;
8221
8222                 if (swab32(val) == TG3_EEPROM_MAGIC)
8223                         break;
8224
8225                 cursize <<= 1;
8226         }
8227
8228         tp->nvram_size = cursize;
8229 }
8230                 
8231 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8232 {
8233         u32 val;
8234
8235         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8236                 if (val != 0) {
8237                         tp->nvram_size = (val >> 16) * 1024;
8238                         return;
8239                 }
8240         }
8241         tp->nvram_size = 0x20000;
8242 }
8243
8244 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8245 {
8246         u32 nvcfg1;
8247
8248         nvcfg1 = tr32(NVRAM_CFG1);
8249         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8250                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8251         }
8252         else {
8253                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8254                 tw32(NVRAM_CFG1, nvcfg1);
8255         }
8256
8257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8258                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8259                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8260                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8261                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8262                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8263                                 break;
8264                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8265                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8266                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8267                                 break;
8268                         case FLASH_VENDOR_ATMEL_EEPROM:
8269                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8270                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8271                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8272                                 break;
8273                         case FLASH_VENDOR_ST:
8274                                 tp->nvram_jedecnum = JEDEC_ST;
8275                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8276                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8277                                 break;
8278                         case FLASH_VENDOR_SAIFUN:
8279                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8280                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8281                                 break;
8282                         case FLASH_VENDOR_SST_SMALL:
8283                         case FLASH_VENDOR_SST_LARGE:
8284                                 tp->nvram_jedecnum = JEDEC_SST;
8285                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8286                                 break;
8287                 }
8288         }
8289         else {
8290                 tp->nvram_jedecnum = JEDEC_ATMEL;
8291                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8292                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8293         }
8294 }
8295
8296 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8297 {
8298         u32 nvcfg1;
8299
8300         nvcfg1 = tr32(NVRAM_CFG1);
8301
8302         /* NVRAM protection for TPM */
8303         if (nvcfg1 & (1 << 27))
8304                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8305
8306         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8307                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8308                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8309                         tp->nvram_jedecnum = JEDEC_ATMEL;
8310                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8311                         break;
8312                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8313                         tp->nvram_jedecnum = JEDEC_ATMEL;
8314                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8315                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8316                         break;
8317                 case FLASH_5752VENDOR_ST_M45PE10:
8318                 case FLASH_5752VENDOR_ST_M45PE20:
8319                 case FLASH_5752VENDOR_ST_M45PE40:
8320                         tp->nvram_jedecnum = JEDEC_ST;
8321                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8322                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8323                         break;
8324         }
8325
8326         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8327                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8328                         case FLASH_5752PAGE_SIZE_256:
8329                                 tp->nvram_pagesize = 256;
8330                                 break;
8331                         case FLASH_5752PAGE_SIZE_512:
8332                                 tp->nvram_pagesize = 512;
8333                                 break;
8334                         case FLASH_5752PAGE_SIZE_1K:
8335                                 tp->nvram_pagesize = 1024;
8336                                 break;
8337                         case FLASH_5752PAGE_SIZE_2K:
8338                                 tp->nvram_pagesize = 2048;
8339                                 break;
8340                         case FLASH_5752PAGE_SIZE_4K:
8341                                 tp->nvram_pagesize = 4096;
8342                                 break;
8343                         case FLASH_5752PAGE_SIZE_264:
8344                                 tp->nvram_pagesize = 264;
8345                                 break;
8346                 }
8347         }
8348         else {
8349                 /* For eeprom, set pagesize to maximum eeprom size */
8350                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8351
8352                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8353                 tw32(NVRAM_CFG1, nvcfg1);
8354         }
8355 }
8356
8357 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8358 static void __devinit tg3_nvram_init(struct tg3 *tp)
8359 {
8360         int j;
8361
8362         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8363                 return;
8364
8365         tw32_f(GRC_EEPROM_ADDR,
8366              (EEPROM_ADDR_FSM_RESET |
8367               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8368                EEPROM_ADDR_CLKPERD_SHIFT)));
8369
8370         /* XXX schedule_timeout() ... */
8371         for (j = 0; j < 100; j++)
8372                 udelay(10);
8373
8374         /* Enable seeprom accesses. */
8375         tw32_f(GRC_LOCAL_CTRL,
8376              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8377         udelay(100);
8378
8379         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8380             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8381                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8382
8383                 tg3_enable_nvram_access(tp);
8384
8385                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8386                         tg3_get_5752_nvram_info(tp);
8387                 else
8388                         tg3_get_nvram_info(tp);
8389
8390                 tg3_get_nvram_size(tp);
8391
8392                 tg3_disable_nvram_access(tp);
8393
8394         } else {
8395                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
8396
8397                 tg3_get_eeprom_size(tp);
8398         }
8399 }
8400
8401 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
8402                                         u32 offset, u32 *val)
8403 {
8404         u32 tmp;
8405         int i;
8406
8407         if (offset > EEPROM_ADDR_ADDR_MASK ||
8408             (offset % 4) != 0)
8409                 return -EINVAL;
8410
8411         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
8412                                         EEPROM_ADDR_DEVID_MASK |
8413                                         EEPROM_ADDR_READ);
8414         tw32(GRC_EEPROM_ADDR,
8415              tmp |
8416              (0 << EEPROM_ADDR_DEVID_SHIFT) |
8417              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
8418               EEPROM_ADDR_ADDR_MASK) |
8419              EEPROM_ADDR_READ | EEPROM_ADDR_START);
8420
8421         for (i = 0; i < 10000; i++) {
8422                 tmp = tr32(GRC_EEPROM_ADDR);
8423
8424                 if (tmp & EEPROM_ADDR_COMPLETE)
8425                         break;
8426                 udelay(100);
8427         }
8428         if (!(tmp & EEPROM_ADDR_COMPLETE))
8429                 return -EBUSY;
8430
8431         *val = tr32(GRC_EEPROM_DATA);
8432         return 0;
8433 }
8434
8435 #define NVRAM_CMD_TIMEOUT 10000
8436
8437 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
8438 {
8439         int i;
8440
8441         tw32(NVRAM_CMD, nvram_cmd);
8442         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
8443                 udelay(10);
8444                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
8445                         udelay(10);
8446                         break;
8447                 }
8448         }
8449         if (i == NVRAM_CMD_TIMEOUT) {
8450                 return -EBUSY;
8451         }
8452         return 0;
8453 }
8454
8455 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
8456 {
8457         int ret;
8458
8459         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8460                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
8461                 return -EINVAL;
8462         }
8463
8464         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
8465                 return tg3_nvram_read_using_eeprom(tp, offset, val);
8466
8467         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
8468                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8469                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8470
8471                 offset = ((offset / tp->nvram_pagesize) <<
8472                           ATMEL_AT45DB0X1B_PAGE_POS) +
8473                         (offset % tp->nvram_pagesize);
8474         }
8475
8476         if (offset > NVRAM_ADDR_MSK)
8477                 return -EINVAL;
8478
8479         tg3_nvram_lock(tp);
8480
8481         tg3_enable_nvram_access(tp);
8482
8483         tw32(NVRAM_ADDR, offset);
8484         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
8485                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
8486
8487         if (ret == 0)
8488                 *val = swab32(tr32(NVRAM_RDDATA));
8489
8490         tg3_nvram_unlock(tp);
8491
8492         tg3_disable_nvram_access(tp);
8493
8494         return ret;
8495 }
8496
8497 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
8498                                     u32 offset, u32 len, u8 *buf)
8499 {
8500         int i, j, rc = 0;
8501         u32 val;
8502
8503         for (i = 0; i < len; i += 4) {
8504                 u32 addr, data;
8505
8506                 addr = offset + i;
8507
8508                 memcpy(&data, buf + i, 4);
8509
8510                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
8511
8512                 val = tr32(GRC_EEPROM_ADDR);
8513                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
8514
8515                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
8516                         EEPROM_ADDR_READ);
8517                 tw32(GRC_EEPROM_ADDR, val |
8518                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
8519                         (addr & EEPROM_ADDR_ADDR_MASK) |
8520                         EEPROM_ADDR_START |
8521                         EEPROM_ADDR_WRITE);
8522                 
8523                 for (j = 0; j < 10000; j++) {
8524                         val = tr32(GRC_EEPROM_ADDR);
8525
8526                         if (val & EEPROM_ADDR_COMPLETE)
8527                                 break;
8528                         udelay(100);
8529                 }
8530                 if (!(val & EEPROM_ADDR_COMPLETE)) {
8531                         rc = -EBUSY;
8532                         break;
8533                 }
8534         }
8535
8536         return rc;
8537 }
8538
8539 /* offset and length are dword aligned */
8540 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
8541                 u8 *buf)
8542 {
8543         int ret = 0;
8544         u32 pagesize = tp->nvram_pagesize;
8545         u32 pagemask = pagesize - 1;
8546         u32 nvram_cmd;
8547         u8 *tmp;
8548
8549         tmp = kmalloc(pagesize, GFP_KERNEL);
8550         if (tmp == NULL)
8551                 return -ENOMEM;
8552
8553         while (len) {
8554                 int j;
8555                 u32 phy_addr, page_off, size;
8556
8557                 phy_addr = offset & ~pagemask;
8558         
8559                 for (j = 0; j < pagesize; j += 4) {
8560                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
8561                                                 (u32 *) (tmp + j))))
8562                                 break;
8563                 }
8564                 if (ret)
8565                         break;
8566
8567                 page_off = offset & pagemask;
8568                 size = pagesize;
8569                 if (len < size)
8570                         size = len;
8571
8572                 len -= size;
8573
8574                 memcpy(tmp + page_off, buf, size);
8575
8576                 offset = offset + (pagesize - page_off);
8577
8578                 tg3_enable_nvram_access(tp);
8579
8580                 /*
8581                  * Before we can erase the flash page, we need
8582                  * to issue a special "write enable" command.
8583                  */
8584                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8585
8586                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8587                         break;
8588
8589                 /* Erase the target page */
8590                 tw32(NVRAM_ADDR, phy_addr);
8591
8592                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
8593                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
8594
8595                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8596                         break;
8597
8598                 /* Issue another write enable to start the write. */
8599                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8600
8601                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
8602                         break;
8603
8604                 for (j = 0; j < pagesize; j += 4) {
8605                         u32 data;
8606
8607                         data = *((u32 *) (tmp + j));
8608                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
8609
8610                         tw32(NVRAM_ADDR, phy_addr + j);
8611
8612                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
8613                                 NVRAM_CMD_WR;
8614
8615                         if (j == 0)
8616                                 nvram_cmd |= NVRAM_CMD_FIRST;
8617                         else if (j == (pagesize - 4))
8618                                 nvram_cmd |= NVRAM_CMD_LAST;
8619
8620                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8621                                 break;
8622                 }
8623                 if (ret)
8624                         break;
8625         }
8626
8627         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
8628         tg3_nvram_exec_cmd(tp, nvram_cmd);
8629
8630         kfree(tmp);
8631
8632         return ret;
8633 }
8634
8635 /* offset and length are dword aligned */
8636 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
8637                 u8 *buf)
8638 {
8639         int i, ret = 0;
8640
8641         for (i = 0; i < len; i += 4, offset += 4) {
8642                 u32 data, page_off, phy_addr, nvram_cmd;
8643
8644                 memcpy(&data, buf + i, 4);
8645                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
8646
8647                 page_off = offset % tp->nvram_pagesize;
8648
8649                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
8650                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
8651
8652                         phy_addr = ((offset / tp->nvram_pagesize) <<
8653                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
8654                 }
8655                 else {
8656                         phy_addr = offset;
8657                 }
8658
8659                 tw32(NVRAM_ADDR, phy_addr);
8660
8661                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
8662
8663                 if ((page_off == 0) || (i == 0))
8664                         nvram_cmd |= NVRAM_CMD_FIRST;
8665                 else if (page_off == (tp->nvram_pagesize - 4))
8666                         nvram_cmd |= NVRAM_CMD_LAST;
8667
8668                 if (i == (len - 4))
8669                         nvram_cmd |= NVRAM_CMD_LAST;
8670
8671                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
8672                         (nvram_cmd & NVRAM_CMD_FIRST)) {
8673
8674                         if ((ret = tg3_nvram_exec_cmd(tp,
8675                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
8676                                 NVRAM_CMD_DONE)))
8677
8678                                 break;
8679                 }
8680                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8681                         /* We always do complete word writes to eeprom. */
8682                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
8683                 }
8684
8685                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
8686                         break;
8687         }
8688         return ret;
8689 }
8690
8691 /* offset and length are dword aligned */
8692 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
8693 {
8694         int ret;
8695
8696         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
8697                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
8698                 return -EINVAL;
8699         }
8700
8701         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8702                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
8703                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
8704                 udelay(40);
8705         }
8706
8707         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
8708                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
8709         }
8710         else {
8711                 u32 grc_mode;
8712
8713                 tg3_nvram_lock(tp);
8714
8715                 tg3_enable_nvram_access(tp);
8716                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
8717                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
8718                         tw32(NVRAM_WRITE1, 0x406);
8719
8720                 grc_mode = tr32(GRC_MODE);
8721                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
8722
8723                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
8724                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
8725
8726                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
8727                                 buf);
8728                 }
8729                 else {
8730                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
8731                                 buf);
8732                 }
8733
8734                 grc_mode = tr32(GRC_MODE);
8735                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
8736
8737                 tg3_disable_nvram_access(tp);
8738                 tg3_nvram_unlock(tp);
8739         }
8740
8741         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
8742                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8743                 udelay(40);
8744         }
8745
8746         return ret;
8747 }
8748
8749 struct subsys_tbl_ent {
8750         u16 subsys_vendor, subsys_devid;
8751         u32 phy_id;
8752 };
8753
8754 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
8755         /* Broadcom boards. */
8756         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
8757         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
8758         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
8759         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
8760         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
8761         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
8762         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
8763         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
8764         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
8765         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
8766         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
8767
8768         /* 3com boards. */
8769         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
8770         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
8771         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
8772         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
8773         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
8774
8775         /* DELL boards. */
8776         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
8777         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
8778         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
8779         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
8780
8781         /* Compaq boards. */
8782         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
8783         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
8784         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
8785         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
8786         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
8787
8788         /* IBM boards. */
8789         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
8790 };
8791
8792 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
8793 {
8794         int i;
8795
8796         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
8797                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
8798                      tp->pdev->subsystem_vendor) &&
8799                     (subsys_id_to_phy_id[i].subsys_devid ==
8800                      tp->pdev->subsystem_device))
8801                         return &subsys_id_to_phy_id[i];
8802         }
8803         return NULL;
8804 }
8805
8806 /* Since this function may be called in D3-hot power state during
8807  * tg3_init_one(), only config cycles are allowed.
8808  */
8809 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8810 {
8811         u32 val;
8812
8813         /* Make sure register accesses (indirect or otherwise)
8814          * will function correctly.
8815          */
8816         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8817                                tp->misc_host_ctrl);
8818
8819         tp->phy_id = PHY_ID_INVALID;
8820         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8821
8822         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8823         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8824                 u32 nic_cfg, led_cfg;
8825                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
8826                 int eeprom_phy_serdes = 0;
8827
8828                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8829                 tp->nic_sram_data_cfg = nic_cfg;
8830
8831                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
8832                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
8833                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8834                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8835                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
8836                     (ver > 0) && (ver < 0x100))
8837                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
8838
8839                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
8840                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
8841                         eeprom_phy_serdes = 1;
8842
8843                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
8844                 if (nic_phy_id != 0) {
8845                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
8846                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
8847
8848                         eeprom_phy_id  = (id1 >> 16) << 10;
8849                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
8850                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
8851                 } else
8852                         eeprom_phy_id = 0;
8853
8854                 tp->phy_id = eeprom_phy_id;
8855                 if (eeprom_phy_serdes) {
8856                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8857                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8858                         else
8859                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8860                 }
8861
8862                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8863                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
8864                                     SHASTA_EXT_LED_MODE_MASK);
8865                 else
8866                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
8867
8868                 switch (led_cfg) {
8869                 default:
8870                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
8871                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8872                         break;
8873
8874                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
8875                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8876                         break;
8877
8878                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
8879                         tp->led_ctrl = LED_CTRL_MODE_MAC;
8880
8881                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
8882                          * read on some older 5700/5701 bootcode.
8883                          */
8884                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
8885                             ASIC_REV_5700 ||
8886                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
8887                             ASIC_REV_5701)
8888                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
8889
8890                         break;
8891
8892                 case SHASTA_EXT_LED_SHARED:
8893                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
8894                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8895                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
8896                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8897                                                  LED_CTRL_MODE_PHY_2);
8898                         break;
8899
8900                 case SHASTA_EXT_LED_MAC:
8901                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
8902                         break;
8903
8904                 case SHASTA_EXT_LED_COMBO:
8905                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
8906                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
8907                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
8908                                                  LED_CTRL_MODE_PHY_2);
8909                         break;
8910
8911                 };
8912
8913                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8914                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
8915                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
8916                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
8917
8918                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
8919                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
8920                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
8921                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
8922
8923                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8924                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
8925                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8926                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
8927                 }
8928                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
8929                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
8930
8931                 if (cfg2 & (1 << 17))
8932                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
8933
8934                 /* serdes signal pre-emphasis in register 0x590 set by */
8935                 /* bootcode if bit 18 is set */
8936                 if (cfg2 & (1 << 18))
8937                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8938         }
8939 }
8940
8941 static int __devinit tg3_phy_probe(struct tg3 *tp)
8942 {
8943         u32 hw_phy_id_1, hw_phy_id_2;
8944         u32 hw_phy_id, hw_phy_id_masked;
8945         int err;
8946
8947         /* Reading the PHY ID register can conflict with ASF
8948          * firwmare access to the PHY hardware.
8949          */
8950         err = 0;
8951         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
8952                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
8953         } else {
8954                 /* Now read the physical PHY_ID from the chip and verify
8955                  * that it is sane.  If it doesn't look good, we fall back
8956                  * to either the hard-coded table based PHY_ID and failing
8957                  * that the value found in the eeprom area.
8958                  */
8959                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
8960                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
8961
8962                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
8963                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
8964                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
8965
8966                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
8967         }
8968
8969         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
8970                 tp->phy_id = hw_phy_id;
8971                 if (hw_phy_id_masked == PHY_ID_BCM8002)
8972                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8973         } else {
8974                 if (tp->phy_id != PHY_ID_INVALID) {
8975                         /* Do nothing, phy ID already set up in
8976                          * tg3_get_eeprom_hw_cfg().
8977                          */
8978                 } else {
8979                         struct subsys_tbl_ent *p;
8980
8981                         /* No eeprom signature?  Try the hardcoded
8982                          * subsys device table.
8983                          */
8984                         p = lookup_by_subsys(tp);
8985                         if (!p)
8986                                 return -ENODEV;
8987
8988                         tp->phy_id = p->phy_id;
8989                         if (!tp->phy_id ||
8990                             tp->phy_id == PHY_ID_BCM8002)
8991                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
8992                 }
8993         }
8994
8995         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
8996             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
8997                 u32 bmsr, adv_reg, tg3_ctrl;
8998
8999                 tg3_readphy(tp, MII_BMSR, &bmsr);
9000                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9001                     (bmsr & BMSR_LSTATUS))
9002                         goto skip_phy_reset;
9003                     
9004                 err = tg3_phy_reset(tp);
9005                 if (err)
9006                         return err;
9007
9008                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9009                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9010                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9011                 tg3_ctrl = 0;
9012                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9013                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9014                                     MII_TG3_CTRL_ADV_1000_FULL);
9015                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9016                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9017                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9018                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9019                 }
9020
9021                 if (!tg3_copper_is_advertising_all(tp)) {
9022                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9023
9024                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9025                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9026
9027                         tg3_writephy(tp, MII_BMCR,
9028                                      BMCR_ANENABLE | BMCR_ANRESTART);
9029                 }
9030                 tg3_phy_set_wirespeed(tp);
9031
9032                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9033                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9034                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9035         }
9036
9037 skip_phy_reset:
9038         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9039                 err = tg3_init_5401phy_dsp(tp);
9040                 if (err)
9041                         return err;
9042         }
9043
9044         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9045                 err = tg3_init_5401phy_dsp(tp);
9046         }
9047
9048         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9049                 tp->link_config.advertising =
9050                         (ADVERTISED_1000baseT_Half |
9051                          ADVERTISED_1000baseT_Full |
9052                          ADVERTISED_Autoneg |
9053                          ADVERTISED_FIBRE);
9054         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9055                 tp->link_config.advertising &=
9056                         ~(ADVERTISED_1000baseT_Half |
9057                           ADVERTISED_1000baseT_Full);
9058
9059         return err;
9060 }
9061
9062 static void __devinit tg3_read_partno(struct tg3 *tp)
9063 {
9064         unsigned char vpd_data[256];
9065         int i;
9066
9067         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9068                 /* Sun decided not to put the necessary bits in the
9069                  * NVRAM of their onboard tg3 parts :(
9070                  */
9071                 strcpy(tp->board_part_number, "Sun 570X");
9072                 return;
9073         }
9074
9075         for (i = 0; i < 256; i += 4) {
9076                 u32 tmp;
9077
9078                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9079                         goto out_not_found;
9080
9081                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9082                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9083                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9084                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9085         }
9086
9087         /* Now parse and find the part number. */
9088         for (i = 0; i < 256; ) {
9089                 unsigned char val = vpd_data[i];
9090                 int block_end;
9091
9092                 if (val == 0x82 || val == 0x91) {
9093                         i = (i + 3 +
9094                              (vpd_data[i + 1] +
9095                               (vpd_data[i + 2] << 8)));
9096                         continue;
9097                 }
9098
9099                 if (val != 0x90)
9100                         goto out_not_found;
9101
9102                 block_end = (i + 3 +
9103                              (vpd_data[i + 1] +
9104                               (vpd_data[i + 2] << 8)));
9105                 i += 3;
9106                 while (i < block_end) {
9107                         if (vpd_data[i + 0] == 'P' &&
9108                             vpd_data[i + 1] == 'N') {
9109                                 int partno_len = vpd_data[i + 2];
9110
9111                                 if (partno_len > 24)
9112                                         goto out_not_found;
9113
9114                                 memcpy(tp->board_part_number,
9115                                        &vpd_data[i + 3],
9116                                        partno_len);
9117
9118                                 /* Success. */
9119                                 return;
9120                         }
9121                 }
9122
9123                 /* Part number not found. */
9124                 goto out_not_found;
9125         }
9126
9127 out_not_found:
9128         strcpy(tp->board_part_number, "none");
9129 }
9130
9131 #ifdef CONFIG_SPARC64
9132 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9133 {
9134         struct pci_dev *pdev = tp->pdev;
9135         struct pcidev_cookie *pcp = pdev->sysdata;
9136
9137         if (pcp != NULL) {
9138                 int node = pcp->prom_node;
9139                 u32 venid;
9140                 int err;
9141
9142                 err = prom_getproperty(node, "subsystem-vendor-id",
9143                                        (char *) &venid, sizeof(venid));
9144                 if (err == 0 || err == -1)
9145                         return 0;
9146                 if (venid == PCI_VENDOR_ID_SUN)
9147                         return 1;
9148         }
9149         return 0;
9150 }
9151 #endif
9152
9153 static int __devinit tg3_get_invariants(struct tg3 *tp)
9154 {
9155         static struct pci_device_id write_reorder_chipsets[] = {
9156                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9157                              PCI_DEVICE_ID_INTEL_82801AA_8) },
9158                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9159                              PCI_DEVICE_ID_INTEL_82801AB_8) },
9160                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9161                              PCI_DEVICE_ID_INTEL_82801BA_11) },
9162                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
9163                              PCI_DEVICE_ID_INTEL_82801BA_6) },
9164                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9165                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9166                 { },
9167         };
9168         u32 misc_ctrl_reg;
9169         u32 cacheline_sz_reg;
9170         u32 pci_state_reg, grc_misc_cfg;
9171         u32 val;
9172         u16 pci_cmd;
9173         int err;
9174
9175 #ifdef CONFIG_SPARC64
9176         if (tg3_is_sun_570X(tp))
9177                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9178 #endif
9179
9180         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
9181          * reordering to the mailbox registers done by the host
9182          * controller can cause major troubles.  We read back from
9183          * every mailbox register write to force the writes to be
9184          * posted to the chip in order.
9185          */
9186         if (pci_dev_present(write_reorder_chipsets))
9187                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9188
9189         /* Force memory write invalidate off.  If we leave it on,
9190          * then on 5700_BX chips we have to enable a workaround.
9191          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9192          * to match the cacheline size.  The Broadcom driver have this
9193          * workaround but turns MWI off all the times so never uses
9194          * it.  This seems to suggest that the workaround is insufficient.
9195          */
9196         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9197         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9198         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9199
9200         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9201          * has the register indirect write enable bit set before
9202          * we try to access any of the MMIO registers.  It is also
9203          * critical that the PCI-X hw workaround situation is decided
9204          * before that as well.
9205          */
9206         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9207                               &misc_ctrl_reg);
9208
9209         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9210                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9211
9212         /* Wrong chip ID in 5752 A0. This code can be removed later
9213          * as A0 is not in production.
9214          */
9215         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9216                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9217
9218         /* Find msi capability. */
9219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9220                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9221
9222         /* Initialize misc host control in PCI block. */
9223         tp->misc_host_ctrl |= (misc_ctrl_reg &
9224                                MISC_HOST_CTRL_CHIPREV);
9225         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9226                                tp->misc_host_ctrl);
9227
9228         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9229                               &cacheline_sz_reg);
9230
9231         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
9232         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
9233         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
9234         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
9235
9236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9237             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9238             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9239                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9240
9241         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
9242             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
9243                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
9244
9245         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9246                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
9247
9248         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
9249             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
9250             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
9251                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
9252
9253         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9254                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9255
9256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9257             tp->pci_lat_timer < 64) {
9258                 tp->pci_lat_timer = 64;
9259
9260                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
9261                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
9262                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
9263                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
9264
9265                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
9266                                        cacheline_sz_reg);
9267         }
9268
9269         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9270                               &pci_state_reg);
9271
9272         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
9273                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
9274
9275                 /* If this is a 5700 BX chipset, and we are in PCI-X
9276                  * mode, enable register write workaround.
9277                  *
9278                  * The workaround is to use indirect register accesses
9279                  * for all chip writes not to mailbox registers.
9280                  */
9281                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
9282                         u32 pm_reg;
9283                         u16 pci_cmd;
9284
9285                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9286
9287                         /* The chip can have it's power management PCI config
9288                          * space registers clobbered due to this bug.
9289                          * So explicitly force the chip into D0 here.
9290                          */
9291                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9292                                               &pm_reg);
9293                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
9294                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9295                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
9296                                                pm_reg);
9297
9298                         /* Also, force SERR#/PERR# in PCI command. */
9299                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9300                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
9301                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9302                 }
9303         }
9304
9305         /* Back to back register writes can cause problems on this chip,
9306          * the workaround is to read back all reg writes except those to
9307          * mailbox regs.  See tg3_write_indirect_reg32().
9308          *
9309          * PCI Express 5750_A0 rev chips need this workaround too.
9310          */
9311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
9312             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
9313              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
9314                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
9315
9316         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
9317                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
9318         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
9319                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
9320
9321         /* Chip-specific fixup from Broadcom driver */
9322         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
9323             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
9324                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
9325                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
9326         }
9327
9328         /* Get eeprom hw config before calling tg3_set_power_state().
9329          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9330          * determined before calling tg3_set_power_state() so that
9331          * we know whether or not to switch out of Vaux power.
9332          * When the flag is set, it means that GPIO1 is used for eeprom
9333          * write protect and also implies that it is a LOM where GPIOs
9334          * are not used to switch power.
9335          */ 
9336         tg3_get_eeprom_hw_cfg(tp);
9337
9338         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9339          * GPIO1 driven high will bring 5700's external PHY out of reset.
9340          * It is also used as eeprom write protect on LOMs.
9341          */
9342         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
9343         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9344             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
9345                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9346                                        GRC_LCLCTRL_GPIO_OUTPUT1);
9347         /* Unused GPIO3 must be driven as output on 5752 because there
9348          * are no pull-up resistors on unused GPIO pins.
9349          */
9350         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9351                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
9352
9353         /* Force the chip into D0. */
9354         err = tg3_set_power_state(tp, 0);
9355         if (err) {
9356                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
9357                        pci_name(tp->pdev));
9358                 return err;
9359         }
9360
9361         /* 5700 B0 chips do not support checksumming correctly due
9362          * to hardware bugs.
9363          */
9364         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
9365                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
9366
9367         /* Pseudo-header checksum is done by hardware logic and not
9368          * the offload processers, so make the chip do the pseudo-
9369          * header checksums on receive.  For transmit it is more
9370          * convenient to do the pseudo-header checksum in software
9371          * as Linux does that on transmit for us in all cases.
9372          */
9373         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
9374         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
9375
9376         /* Derive initial jumbo mode from MTU assigned in
9377          * ether_setup() via the alloc_etherdev() call
9378          */
9379         if (tp->dev->mtu > ETH_DATA_LEN &&
9380             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)
9381                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9382
9383         /* Determine WakeOnLan speed to use. */
9384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9385             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9386             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
9387             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
9388                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
9389         } else {
9390                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
9391         }
9392
9393         /* A few boards don't want Ethernet@WireSpeed phy feature */
9394         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
9395             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
9396              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
9397              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
9398             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9399                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
9400
9401         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
9402             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
9403                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
9404         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
9405                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
9406
9407         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9408                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
9409
9410         tp->coalesce_mode = 0;
9411         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
9412             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
9413                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
9414
9415         /* Initialize MAC MI mode, polling disabled. */
9416         tw32_f(MAC_MI_MODE, tp->mi_mode);
9417         udelay(80);
9418
9419         /* Initialize data/descriptor byte/word swapping. */
9420         val = tr32(GRC_MODE);
9421         val &= GRC_MODE_HOST_STACKUP;
9422         tw32(GRC_MODE, val | tp->grc_mode);
9423
9424         tg3_switch_clocks(tp);
9425
9426         /* Clear this out for sanity. */
9427         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9428
9429         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
9430                               &pci_state_reg);
9431         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
9432             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
9433                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
9434
9435                 if (chiprevid == CHIPREV_ID_5701_A0 ||
9436                     chiprevid == CHIPREV_ID_5701_B0 ||
9437                     chiprevid == CHIPREV_ID_5701_B2 ||
9438                     chiprevid == CHIPREV_ID_5701_B5) {
9439                         void __iomem *sram_base;
9440
9441                         /* Write some dummy words into the SRAM status block
9442                          * area, see if it reads back correctly.  If the return
9443                          * value is bad, force enable the PCIX workaround.
9444                          */
9445                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
9446
9447                         writel(0x00000000, sram_base);
9448                         writel(0x00000000, sram_base + 4);
9449                         writel(0xffffffff, sram_base + 4);
9450                         if (readl(sram_base) != 0x00000000)
9451                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
9452                 }
9453         }
9454
9455         udelay(50);
9456         tg3_nvram_init(tp);
9457
9458         grc_misc_cfg = tr32(GRC_MISC_CFG);
9459         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
9460
9461         /* Broadcom's driver says that CIOBE multisplit has a bug */
9462 #if 0
9463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9464             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
9465                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
9466                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
9467         }
9468 #endif
9469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9470             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
9471              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
9472                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
9473
9474         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9475             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
9476                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
9477         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
9478                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
9479                                       HOSTCC_MODE_CLRTICK_TXBD);
9480
9481                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
9482                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9483                                        tp->misc_host_ctrl);
9484         }
9485
9486         /* these are limited to 10/100 only */
9487         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9488              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
9489             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9490              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9491              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
9492               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
9493               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
9494             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
9495              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
9496               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
9497                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
9498
9499         err = tg3_phy_probe(tp);
9500         if (err) {
9501                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
9502                        pci_name(tp->pdev), err);
9503                 /* ... but do not return immediately ... */
9504         }
9505
9506         tg3_read_partno(tp);
9507
9508         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
9509                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9510         } else {
9511                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9512                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
9513                 else
9514                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
9515         }
9516
9517         /* 5700 {AX,BX} chips have a broken status block link
9518          * change bit implementation, so we must use the
9519          * status register in those cases.
9520          */
9521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
9522                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
9523         else
9524                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
9525
9526         /* The led_ctrl is set during tg3_phy_probe, here we might
9527          * have to force the link status polling mechanism based
9528          * upon subsystem IDs.
9529          */
9530         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
9531             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9532                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
9533                                   TG3_FLAG_USE_LINKCHG_REG);
9534         }
9535
9536         /* For all SERDES we poll the MAC status register. */
9537         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9538                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
9539         else
9540                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
9541
9542         /* 5700 BX chips need to have their TX producer index mailboxes
9543          * written twice to workaround a bug.
9544          */
9545         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
9546                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
9547         else
9548                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
9549
9550         /* It seems all chips can get confused if TX buffers
9551          * straddle the 4GB address boundary in some cases.
9552          */
9553         tp->dev->hard_start_xmit = tg3_start_xmit;
9554
9555         tp->rx_offset = 2;
9556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
9557             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
9558                 tp->rx_offset = 0;
9559
9560         /* By default, disable wake-on-lan.  User can change this
9561          * using ETHTOOL_SWOL.
9562          */
9563         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9564
9565         return err;
9566 }
9567
9568 #ifdef CONFIG_SPARC64
9569 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
9570 {
9571         struct net_device *dev = tp->dev;
9572         struct pci_dev *pdev = tp->pdev;
9573         struct pcidev_cookie *pcp = pdev->sysdata;
9574
9575         if (pcp != NULL) {
9576                 int node = pcp->prom_node;
9577
9578                 if (prom_getproplen(node, "local-mac-address") == 6) {
9579                         prom_getproperty(node, "local-mac-address",
9580                                          dev->dev_addr, 6);
9581                         return 0;
9582                 }
9583         }
9584         return -ENODEV;
9585 }
9586
9587 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
9588 {
9589         struct net_device *dev = tp->dev;
9590
9591         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
9592         return 0;
9593 }
9594 #endif
9595
9596 static int __devinit tg3_get_device_address(struct tg3 *tp)
9597 {
9598         struct net_device *dev = tp->dev;
9599         u32 hi, lo, mac_offset;
9600
9601 #ifdef CONFIG_SPARC64
9602         if (!tg3_get_macaddr_sparc(tp))
9603                 return 0;
9604 #endif
9605
9606         mac_offset = 0x7c;
9607         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9608              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9609             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9610                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9611                         mac_offset = 0xcc;
9612                 if (tg3_nvram_lock(tp))
9613                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
9614                 else
9615                         tg3_nvram_unlock(tp);
9616         }
9617
9618         /* First try to get it from MAC address mailbox. */
9619         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
9620         if ((hi >> 16) == 0x484b) {
9621                 dev->dev_addr[0] = (hi >>  8) & 0xff;
9622                 dev->dev_addr[1] = (hi >>  0) & 0xff;
9623
9624                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
9625                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9626                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9627                 dev->dev_addr[4] = (lo >>  8) & 0xff;
9628                 dev->dev_addr[5] = (lo >>  0) & 0xff;
9629         }
9630         /* Next, try NVRAM. */
9631         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
9632                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
9633                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
9634                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
9635                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
9636                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
9637                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
9638                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
9639                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
9640         }
9641         /* Finally just fetch it out of the MAC control regs. */
9642         else {
9643                 hi = tr32(MAC_ADDR_0_HIGH);
9644                 lo = tr32(MAC_ADDR_0_LOW);
9645
9646                 dev->dev_addr[5] = lo & 0xff;
9647                 dev->dev_addr[4] = (lo >> 8) & 0xff;
9648                 dev->dev_addr[3] = (lo >> 16) & 0xff;
9649                 dev->dev_addr[2] = (lo >> 24) & 0xff;
9650                 dev->dev_addr[1] = hi & 0xff;
9651                 dev->dev_addr[0] = (hi >> 8) & 0xff;
9652         }
9653
9654         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9655 #ifdef CONFIG_SPARC64
9656                 if (!tg3_get_default_macaddr_sparc(tp))
9657                         return 0;
9658 #endif
9659                 return -EINVAL;
9660         }
9661         return 0;
9662 }
9663
9664 #define BOUNDARY_SINGLE_CACHELINE       1
9665 #define BOUNDARY_MULTI_CACHELINE        2
9666
9667 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
9668 {
9669         int cacheline_size;
9670         u8 byte;
9671         int goal;
9672
9673         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
9674         if (byte == 0)
9675                 cacheline_size = 1024;
9676         else
9677                 cacheline_size = (int) byte * 4;
9678
9679         /* On 5703 and later chips, the boundary bits have no
9680          * effect.
9681          */
9682         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9683             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
9684             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9685                 goto out;
9686
9687 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
9688         goal = BOUNDARY_MULTI_CACHELINE;
9689 #else
9690 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
9691         goal = BOUNDARY_SINGLE_CACHELINE;
9692 #else
9693         goal = 0;
9694 #endif
9695 #endif
9696
9697         if (!goal)
9698                 goto out;
9699
9700         /* PCI controllers on most RISC systems tend to disconnect
9701          * when a device tries to burst across a cache-line boundary.
9702          * Therefore, letting tg3 do so just wastes PCI bandwidth.
9703          *
9704          * Unfortunately, for PCI-E there are only limited
9705          * write-side controls for this, and thus for reads
9706          * we will still get the disconnects.  We'll also waste
9707          * these PCI cycles for both read and write for chips
9708          * other than 5700 and 5701 which do not implement the
9709          * boundary bits.
9710          */
9711         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
9712             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
9713                 switch (cacheline_size) {
9714                 case 16:
9715                 case 32:
9716                 case 64:
9717                 case 128:
9718                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9719                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
9720                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
9721                         } else {
9722                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9723                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9724                         }
9725                         break;
9726
9727                 case 256:
9728                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
9729                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
9730                         break;
9731
9732                 default:
9733                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
9734                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
9735                         break;
9736                 };
9737         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9738                 switch (cacheline_size) {
9739                 case 16:
9740                 case 32:
9741                 case 64:
9742                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9743                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9744                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
9745                                 break;
9746                         }
9747                         /* fallthrough */
9748                 case 128:
9749                 default:
9750                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
9751                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
9752                         break;
9753                 };
9754         } else {
9755                 switch (cacheline_size) {
9756                 case 16:
9757                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9758                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
9759                                         DMA_RWCTRL_WRITE_BNDRY_16);
9760                                 break;
9761                         }
9762                         /* fallthrough */
9763                 case 32:
9764                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9765                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
9766                                         DMA_RWCTRL_WRITE_BNDRY_32);
9767                                 break;
9768                         }
9769                         /* fallthrough */
9770                 case 64:
9771                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9772                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
9773                                         DMA_RWCTRL_WRITE_BNDRY_64);
9774                                 break;
9775                         }
9776                         /* fallthrough */
9777                 case 128:
9778                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
9779                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
9780                                         DMA_RWCTRL_WRITE_BNDRY_128);
9781                                 break;
9782                         }
9783                         /* fallthrough */
9784                 case 256:
9785                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
9786                                 DMA_RWCTRL_WRITE_BNDRY_256);
9787                         break;
9788                 case 512:
9789                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
9790                                 DMA_RWCTRL_WRITE_BNDRY_512);
9791                         break;
9792                 case 1024:
9793                 default:
9794                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
9795                                 DMA_RWCTRL_WRITE_BNDRY_1024);
9796                         break;
9797                 };
9798         }
9799
9800 out:
9801         return val;
9802 }
9803
9804 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
9805 {
9806         struct tg3_internal_buffer_desc test_desc;
9807         u32 sram_dma_descs;
9808         int i, ret;
9809
9810         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
9811
9812         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
9813         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
9814         tw32(RDMAC_STATUS, 0);
9815         tw32(WDMAC_STATUS, 0);
9816
9817         tw32(BUFMGR_MODE, 0);
9818         tw32(FTQ_RESET, 0);
9819
9820         test_desc.addr_hi = ((u64) buf_dma) >> 32;
9821         test_desc.addr_lo = buf_dma & 0xffffffff;
9822         test_desc.nic_mbuf = 0x00002100;
9823         test_desc.len = size;
9824
9825         /*
9826          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
9827          * the *second* time the tg3 driver was getting loaded after an
9828          * initial scan.
9829          *
9830          * Broadcom tells me:
9831          *   ...the DMA engine is connected to the GRC block and a DMA
9832          *   reset may affect the GRC block in some unpredictable way...
9833          *   The behavior of resets to individual blocks has not been tested.
9834          *
9835          * Broadcom noted the GRC reset will also reset all sub-components.
9836          */
9837         if (to_device) {
9838                 test_desc.cqid_sqid = (13 << 8) | 2;
9839
9840                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
9841                 udelay(40);
9842         } else {
9843                 test_desc.cqid_sqid = (16 << 8) | 7;
9844
9845                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
9846                 udelay(40);
9847         }
9848         test_desc.flags = 0x00000005;
9849
9850         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
9851                 u32 val;
9852
9853                 val = *(((u32 *)&test_desc) + i);
9854                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
9855                                        sram_dma_descs + (i * sizeof(u32)));
9856                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
9857         }
9858         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
9859
9860         if (to_device) {
9861                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
9862         } else {
9863                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
9864         }
9865
9866         ret = -ENODEV;
9867         for (i = 0; i < 40; i++) {
9868                 u32 val;
9869
9870                 if (to_device)
9871                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
9872                 else
9873                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
9874                 if ((val & 0xffff) == sram_dma_descs) {
9875                         ret = 0;
9876                         break;
9877                 }
9878
9879                 udelay(100);
9880         }
9881
9882         return ret;
9883 }
9884
9885 #define TEST_BUFFER_SIZE        0x2000
9886
9887 static int __devinit tg3_test_dma(struct tg3 *tp)
9888 {
9889         dma_addr_t buf_dma;
9890         u32 *buf, saved_dma_rwctrl;
9891         int ret;
9892
9893         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
9894         if (!buf) {
9895                 ret = -ENOMEM;
9896                 goto out_nofree;
9897         }
9898
9899         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
9900                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
9901
9902         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
9903
9904         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9905                 /* DMA read watermark not used on PCIE */
9906                 tp->dma_rwctrl |= 0x00180000;
9907         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
9908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
9909                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
9910                         tp->dma_rwctrl |= 0x003f0000;
9911                 else
9912                         tp->dma_rwctrl |= 0x003f000f;
9913         } else {
9914                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9915                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9916                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
9917
9918                         if (ccval == 0x6 || ccval == 0x7)
9919                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
9920
9921                         /* Set bit 23 to enable PCIX hw bug fix */
9922                         tp->dma_rwctrl |= 0x009f0000;
9923                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
9924                         /* 5780 always in PCIX mode */
9925                         tp->dma_rwctrl |= 0x00144000;
9926                 } else {
9927                         tp->dma_rwctrl |= 0x001b000f;
9928                 }
9929         }
9930
9931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
9932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9933                 tp->dma_rwctrl &= 0xfffffff0;
9934
9935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
9937                 /* Remove this if it causes problems for some boards. */
9938                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
9939
9940                 /* On 5700/5701 chips, we need to set this bit.
9941                  * Otherwise the chip will issue cacheline transactions
9942                  * to streamable DMA memory with not all the byte
9943                  * enables turned on.  This is an error on several
9944                  * RISC PCI controllers, in particular sparc64.
9945                  *
9946                  * On 5703/5704 chips, this bit has been reassigned
9947                  * a different meaning.  In particular, it is used
9948                  * on those chips to enable a PCI-X workaround.
9949                  */
9950                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
9951         }
9952
9953         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9954
9955 #if 0
9956         /* Unneeded, already done by tg3_get_invariants.  */
9957         tg3_switch_clocks(tp);
9958 #endif
9959
9960         ret = 0;
9961         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9962             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
9963                 goto out;
9964
9965         /* It is best to perform DMA test with maximum write burst size
9966          * to expose the 5700/5701 write DMA bug.
9967          */
9968         saved_dma_rwctrl = tp->dma_rwctrl;
9969         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9970         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9971
9972         while (1) {
9973                 u32 *p = buf, i;
9974
9975                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
9976                         p[i] = i;
9977
9978                 /* Send the buffer to the chip. */
9979                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
9980                 if (ret) {
9981                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
9982                         break;
9983                 }
9984
9985 #if 0
9986                 /* validate data reached card RAM correctly. */
9987                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
9988                         u32 val;
9989                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
9990                         if (le32_to_cpu(val) != p[i]) {
9991                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
9992                                 /* ret = -ENODEV here? */
9993                         }
9994                         p[i] = 0;
9995                 }
9996 #endif
9997                 /* Now read it back. */
9998                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
9999                 if (ret) {
10000                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10001
10002                         break;
10003                 }
10004
10005                 /* Verify it. */
10006                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10007                         if (p[i] == i)
10008                                 continue;
10009
10010                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10011                             DMA_RWCTRL_WRITE_BNDRY_16) {
10012                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10013                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10014                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10015                                 break;
10016                         } else {
10017                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10018                                 ret = -ENODEV;
10019                                 goto out;
10020                         }
10021                 }
10022
10023                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10024                         /* Success. */
10025                         ret = 0;
10026                         break;
10027                 }
10028         }
10029         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10030             DMA_RWCTRL_WRITE_BNDRY_16) {
10031                 static struct pci_device_id dma_wait_state_chipsets[] = {
10032                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10033                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10034                         { },
10035                 };
10036
10037                 /* DMA test passed without adjusting DMA boundary,
10038                  * now look for chipsets that are known to expose the
10039                  * DMA bug without failing the test.
10040                  */
10041                 if (pci_dev_present(dma_wait_state_chipsets)) {
10042                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10043                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10044                 }
10045                 else
10046                         /* Safe to use the calculated DMA boundary. */
10047                         tp->dma_rwctrl = saved_dma_rwctrl;
10048
10049                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10050         }
10051
10052 out:
10053         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10054 out_nofree:
10055         return ret;
10056 }
10057
10058 static void __devinit tg3_init_link_config(struct tg3 *tp)
10059 {
10060         tp->link_config.advertising =
10061                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10062                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10063                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10064                  ADVERTISED_Autoneg | ADVERTISED_MII);
10065         tp->link_config.speed = SPEED_INVALID;
10066         tp->link_config.duplex = DUPLEX_INVALID;
10067         tp->link_config.autoneg = AUTONEG_ENABLE;
10068         netif_carrier_off(tp->dev);
10069         tp->link_config.active_speed = SPEED_INVALID;
10070         tp->link_config.active_duplex = DUPLEX_INVALID;
10071         tp->link_config.phy_is_low_power = 0;
10072         tp->link_config.orig_speed = SPEED_INVALID;
10073         tp->link_config.orig_duplex = DUPLEX_INVALID;
10074         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10075 }
10076
10077 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10078 {
10079         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10080                 tp->bufmgr_config.mbuf_read_dma_low_water =
10081                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10082                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10083                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10084                 tp->bufmgr_config.mbuf_high_water =
10085                         DEFAULT_MB_HIGH_WATER_5705;
10086
10087                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10088                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10089                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10090                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10091                 tp->bufmgr_config.mbuf_high_water_jumbo =
10092                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10093         } else {
10094                 tp->bufmgr_config.mbuf_read_dma_low_water =
10095                         DEFAULT_MB_RDMA_LOW_WATER;
10096                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10097                         DEFAULT_MB_MACRX_LOW_WATER;
10098                 tp->bufmgr_config.mbuf_high_water =
10099                         DEFAULT_MB_HIGH_WATER;
10100
10101                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10102                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10103                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10104                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10105                 tp->bufmgr_config.mbuf_high_water_jumbo =
10106                         DEFAULT_MB_HIGH_WATER_JUMBO;
10107         }
10108
10109         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10110         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10111 }
10112
10113 static char * __devinit tg3_phy_string(struct tg3 *tp)
10114 {
10115         switch (tp->phy_id & PHY_ID_MASK) {
10116         case PHY_ID_BCM5400:    return "5400";
10117         case PHY_ID_BCM5401:    return "5401";
10118         case PHY_ID_BCM5411:    return "5411";
10119         case PHY_ID_BCM5701:    return "5701";
10120         case PHY_ID_BCM5703:    return "5703";
10121         case PHY_ID_BCM5704:    return "5704";
10122         case PHY_ID_BCM5705:    return "5705";
10123         case PHY_ID_BCM5750:    return "5750";
10124         case PHY_ID_BCM5752:    return "5752";
10125         case PHY_ID_BCM5780:    return "5780";
10126         case PHY_ID_BCM8002:    return "8002/serdes";
10127         case 0:                 return "serdes";
10128         default:                return "unknown";
10129         };
10130 }
10131
10132 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10133 {
10134         struct pci_dev *peer;
10135         unsigned int func, devnr = tp->pdev->devfn & ~7;
10136
10137         for (func = 0; func < 8; func++) {
10138                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
10139                 if (peer && peer != tp->pdev)
10140                         break;
10141                 pci_dev_put(peer);
10142         }
10143         if (!peer || peer == tp->pdev)
10144                 BUG();
10145
10146         /*
10147          * We don't need to keep the refcount elevated; there's no way
10148          * to remove one half of this device without removing the other
10149          */
10150         pci_dev_put(peer);
10151
10152         return peer;
10153 }
10154
10155 static void __devinit tg3_init_coal(struct tg3 *tp)
10156 {
10157         struct ethtool_coalesce *ec = &tp->coal;
10158
10159         memset(ec, 0, sizeof(*ec));
10160         ec->cmd = ETHTOOL_GCOALESCE;
10161         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
10162         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
10163         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
10164         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
10165         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
10166         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
10167         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
10168         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
10169         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
10170
10171         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
10172                                  HOSTCC_MODE_CLRTICK_TXBD)) {
10173                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
10174                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
10175                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
10176                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
10177         }
10178
10179         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10180                 ec->rx_coalesce_usecs_irq = 0;
10181                 ec->tx_coalesce_usecs_irq = 0;
10182                 ec->stats_block_coalesce_usecs = 0;
10183         }
10184 }
10185
10186 static int __devinit tg3_init_one(struct pci_dev *pdev,
10187                                   const struct pci_device_id *ent)
10188 {
10189         static int tg3_version_printed = 0;
10190         unsigned long tg3reg_base, tg3reg_len;
10191         struct net_device *dev;
10192         struct tg3 *tp;
10193         int i, err, pci_using_dac, pm_cap;
10194
10195         if (tg3_version_printed++ == 0)
10196                 printk(KERN_INFO "%s", version);
10197
10198         err = pci_enable_device(pdev);
10199         if (err) {
10200                 printk(KERN_ERR PFX "Cannot enable PCI device, "
10201                        "aborting.\n");
10202                 return err;
10203         }
10204
10205         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10206                 printk(KERN_ERR PFX "Cannot find proper PCI device "
10207                        "base address, aborting.\n");
10208                 err = -ENODEV;
10209                 goto err_out_disable_pdev;
10210         }
10211
10212         err = pci_request_regions(pdev, DRV_MODULE_NAME);
10213         if (err) {
10214                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
10215                        "aborting.\n");
10216                 goto err_out_disable_pdev;
10217         }
10218
10219         pci_set_master(pdev);
10220
10221         /* Find power-management capability. */
10222         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10223         if (pm_cap == 0) {
10224                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
10225                        "aborting.\n");
10226                 err = -EIO;
10227                 goto err_out_free_res;
10228         }
10229
10230         /* Configure DMA attributes. */
10231         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
10232         if (!err) {
10233                 pci_using_dac = 1;
10234                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
10235                 if (err < 0) {
10236                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10237                                "for consistent allocations\n");
10238                         goto err_out_free_res;
10239                 }
10240         } else {
10241                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
10242                 if (err) {
10243                         printk(KERN_ERR PFX "No usable DMA configuration, "
10244                                "aborting.\n");
10245                         goto err_out_free_res;
10246                 }
10247                 pci_using_dac = 0;
10248         }
10249
10250         tg3reg_base = pci_resource_start(pdev, 0);
10251         tg3reg_len = pci_resource_len(pdev, 0);
10252
10253         dev = alloc_etherdev(sizeof(*tp));
10254         if (!dev) {
10255                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
10256                 err = -ENOMEM;
10257                 goto err_out_free_res;
10258         }
10259
10260         SET_MODULE_OWNER(dev);
10261         SET_NETDEV_DEV(dev, &pdev->dev);
10262
10263         if (pci_using_dac)
10264                 dev->features |= NETIF_F_HIGHDMA;
10265         dev->features |= NETIF_F_LLTX;
10266 #if TG3_VLAN_TAG_USED
10267         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
10268         dev->vlan_rx_register = tg3_vlan_rx_register;
10269         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
10270 #endif
10271
10272         tp = netdev_priv(dev);
10273         tp->pdev = pdev;
10274         tp->dev = dev;
10275         tp->pm_cap = pm_cap;
10276         tp->mac_mode = TG3_DEF_MAC_MODE;
10277         tp->rx_mode = TG3_DEF_RX_MODE;
10278         tp->tx_mode = TG3_DEF_TX_MODE;
10279         tp->mi_mode = MAC_MI_MODE_BASE;
10280         if (tg3_debug > 0)
10281                 tp->msg_enable = tg3_debug;
10282         else
10283                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
10284
10285         /* The word/byte swap controls here control register access byte
10286          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
10287          * setting below.
10288          */
10289         tp->misc_host_ctrl =
10290                 MISC_HOST_CTRL_MASK_PCI_INT |
10291                 MISC_HOST_CTRL_WORD_SWAP |
10292                 MISC_HOST_CTRL_INDIR_ACCESS |
10293                 MISC_HOST_CTRL_PCISTATE_RW;
10294
10295         /* The NONFRM (non-frame) byte/word swap controls take effect
10296          * on descriptor entries, anything which isn't packet data.
10297          *
10298          * The StrongARM chips on the board (one for tx, one for rx)
10299          * are running in big-endian mode.
10300          */
10301         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
10302                         GRC_MODE_WSWAP_NONFRM_DATA);
10303 #ifdef __BIG_ENDIAN
10304         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
10305 #endif
10306         spin_lock_init(&tp->lock);
10307         spin_lock_init(&tp->tx_lock);
10308         spin_lock_init(&tp->indirect_lock);
10309         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
10310
10311         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
10312         if (tp->regs == 0UL) {
10313                 printk(KERN_ERR PFX "Cannot map device registers, "
10314                        "aborting.\n");
10315                 err = -ENOMEM;
10316                 goto err_out_free_dev;
10317         }
10318
10319         tg3_init_link_config(tp);
10320
10321         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
10322         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
10323         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
10324
10325         dev->open = tg3_open;
10326         dev->stop = tg3_close;
10327         dev->get_stats = tg3_get_stats;
10328         dev->set_multicast_list = tg3_set_rx_mode;
10329         dev->set_mac_address = tg3_set_mac_addr;
10330         dev->do_ioctl = tg3_ioctl;
10331         dev->tx_timeout = tg3_tx_timeout;
10332         dev->poll = tg3_poll;
10333         dev->ethtool_ops = &tg3_ethtool_ops;
10334         dev->weight = 64;
10335         dev->watchdog_timeo = TG3_TX_TIMEOUT;
10336         dev->change_mtu = tg3_change_mtu;
10337         dev->irq = pdev->irq;
10338 #ifdef CONFIG_NET_POLL_CONTROLLER
10339         dev->poll_controller = tg3_poll_controller;
10340 #endif
10341
10342         err = tg3_get_invariants(tp);
10343         if (err) {
10344                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
10345                        "aborting.\n");
10346                 goto err_out_iounmap;
10347         }
10348
10349         tg3_init_bufmgr_config(tp);
10350
10351 #if TG3_TSO_SUPPORT != 0
10352         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
10353                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10354         }
10355         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10356             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10357             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
10358             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
10359                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
10360         } else {
10361                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
10362         }
10363
10364         /* TSO is off by default, user can enable using ethtool.  */
10365 #if 0
10366         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
10367                 dev->features |= NETIF_F_TSO;
10368 #endif
10369
10370 #endif
10371
10372         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
10373             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
10374             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
10375                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
10376                 tp->rx_pending = 63;
10377         }
10378
10379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10380                 tp->pdev_peer = tg3_find_5704_peer(tp);
10381
10382         err = tg3_get_device_address(tp);
10383         if (err) {
10384                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
10385                        "aborting.\n");
10386                 goto err_out_iounmap;
10387         }
10388
10389         /*
10390          * Reset chip in case UNDI or EFI driver did not shutdown
10391          * DMA self test will enable WDMAC and we'll see (spurious)
10392          * pending DMA on the PCI bus at that point.
10393          */
10394         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
10395             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10396                 pci_save_state(tp->pdev);
10397                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
10398                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10399         }
10400
10401         err = tg3_test_dma(tp);
10402         if (err) {
10403                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
10404                 goto err_out_iounmap;
10405         }
10406
10407         /* Tigon3 can do ipv4 only... and some chips have buggy
10408          * checksumming.
10409          */
10410         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
10411                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
10412                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10413         } else
10414                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10415
10416         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
10417                 dev->features &= ~NETIF_F_HIGHDMA;
10418
10419         /* flow control autonegotiation is default behavior */
10420         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10421
10422         tg3_init_coal(tp);
10423
10424         /* Now that we have fully setup the chip, save away a snapshot
10425          * of the PCI config space.  We need to restore this after
10426          * GRC_MISC_CFG core clock resets and some resume events.
10427          */
10428         pci_save_state(tp->pdev);
10429
10430         err = register_netdev(dev);
10431         if (err) {
10432                 printk(KERN_ERR PFX "Cannot register net device, "
10433                        "aborting.\n");
10434                 goto err_out_iounmap;
10435         }
10436
10437         pci_set_drvdata(pdev, dev);
10438
10439         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
10440                dev->name,
10441                tp->board_part_number,
10442                tp->pci_chip_rev_id,
10443                tg3_phy_string(tp),
10444                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
10445                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10446                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10447                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10448                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10449                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10450
10451         for (i = 0; i < 6; i++)
10452                 printk("%2.2x%c", dev->dev_addr[i],
10453                        i == 5 ? '\n' : ':');
10454
10455         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
10456                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10457                "TSOcap[%d] \n",
10458                dev->name,
10459                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
10460                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
10461                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
10462                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
10463                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
10464                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
10465                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
10466         printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
10467                dev->name, tp->dma_rwctrl);
10468
10469         return 0;
10470
10471 err_out_iounmap:
10472         iounmap(tp->regs);
10473
10474 err_out_free_dev:
10475         free_netdev(dev);
10476
10477 err_out_free_res:
10478         pci_release_regions(pdev);
10479
10480 err_out_disable_pdev:
10481         pci_disable_device(pdev);
10482         pci_set_drvdata(pdev, NULL);
10483         return err;
10484 }
10485
10486 static void __devexit tg3_remove_one(struct pci_dev *pdev)
10487 {
10488         struct net_device *dev = pci_get_drvdata(pdev);
10489
10490         if (dev) {
10491                 struct tg3 *tp = netdev_priv(dev);
10492
10493                 unregister_netdev(dev);
10494                 iounmap(tp->regs);
10495                 free_netdev(dev);
10496                 pci_release_regions(pdev);
10497                 pci_disable_device(pdev);
10498                 pci_set_drvdata(pdev, NULL);
10499         }
10500 }
10501
10502 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
10503 {
10504         struct net_device *dev = pci_get_drvdata(pdev);
10505         struct tg3 *tp = netdev_priv(dev);
10506         int err;
10507
10508         if (!netif_running(dev))
10509                 return 0;
10510
10511         tg3_netif_stop(tp);
10512
10513         del_timer_sync(&tp->timer);
10514
10515         tg3_full_lock(tp, 1);
10516         tg3_disable_ints(tp);
10517         tg3_full_unlock(tp);
10518
10519         netif_device_detach(dev);
10520
10521         tg3_full_lock(tp, 0);
10522         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10523         tg3_full_unlock(tp);
10524
10525         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
10526         if (err) {
10527                 tg3_full_lock(tp, 0);
10528
10529                 tg3_init_hw(tp);
10530
10531                 tp->timer.expires = jiffies + tp->timer_offset;
10532                 add_timer(&tp->timer);
10533
10534                 netif_device_attach(dev);
10535                 tg3_netif_start(tp);
10536
10537                 tg3_full_unlock(tp);
10538         }
10539
10540         return err;
10541 }
10542
10543 static int tg3_resume(struct pci_dev *pdev)
10544 {
10545         struct net_device *dev = pci_get_drvdata(pdev);
10546         struct tg3 *tp = netdev_priv(dev);
10547         int err;
10548
10549         if (!netif_running(dev))
10550                 return 0;
10551
10552         pci_restore_state(tp->pdev);
10553
10554         err = tg3_set_power_state(tp, 0);
10555         if (err)
10556                 return err;
10557
10558         netif_device_attach(dev);
10559
10560         tg3_full_lock(tp, 0);
10561
10562         tg3_init_hw(tp);
10563
10564         tp->timer.expires = jiffies + tp->timer_offset;
10565         add_timer(&tp->timer);
10566
10567         tg3_netif_start(tp);
10568
10569         tg3_full_unlock(tp);
10570
10571         return 0;
10572 }
10573
10574 static struct pci_driver tg3_driver = {
10575         .name           = DRV_MODULE_NAME,
10576         .id_table       = tg3_pci_tbl,
10577         .probe          = tg3_init_one,
10578         .remove         = __devexit_p(tg3_remove_one),
10579         .suspend        = tg3_suspend,
10580         .resume         = tg3_resume
10581 };
10582
10583 static int __init tg3_init(void)
10584 {
10585         return pci_module_init(&tg3_driver);
10586 }
10587
10588 static void __exit tg3_cleanup(void)
10589 {
10590         pci_unregister_driver(&tg3_driver);
10591 }
10592
10593 module_init(tg3_init);
10594 module_exit(tg3_cleanup);