[TG3]: make drivers/net/tg3.c:tg3_request_irq() static
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.52"
73 #define DRV_MODULE_RELDATE      "Mar 06, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { 0, }
261 };
262
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264
265 static struct {
266         const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
268         { "rx_octets" },
269         { "rx_fragments" },
270         { "rx_ucast_packets" },
271         { "rx_mcast_packets" },
272         { "rx_bcast_packets" },
273         { "rx_fcs_errors" },
274         { "rx_align_errors" },
275         { "rx_xon_pause_rcvd" },
276         { "rx_xoff_pause_rcvd" },
277         { "rx_mac_ctrl_rcvd" },
278         { "rx_xoff_entered" },
279         { "rx_frame_too_long_errors" },
280         { "rx_jabbers" },
281         { "rx_undersize_packets" },
282         { "rx_in_length_errors" },
283         { "rx_out_length_errors" },
284         { "rx_64_or_less_octet_packets" },
285         { "rx_65_to_127_octet_packets" },
286         { "rx_128_to_255_octet_packets" },
287         { "rx_256_to_511_octet_packets" },
288         { "rx_512_to_1023_octet_packets" },
289         { "rx_1024_to_1522_octet_packets" },
290         { "rx_1523_to_2047_octet_packets" },
291         { "rx_2048_to_4095_octet_packets" },
292         { "rx_4096_to_8191_octet_packets" },
293         { "rx_8192_to_9022_octet_packets" },
294
295         { "tx_octets" },
296         { "tx_collisions" },
297
298         { "tx_xon_sent" },
299         { "tx_xoff_sent" },
300         { "tx_flow_control" },
301         { "tx_mac_errors" },
302         { "tx_single_collisions" },
303         { "tx_mult_collisions" },
304         { "tx_deferred" },
305         { "tx_excessive_collisions" },
306         { "tx_late_collisions" },
307         { "tx_collide_2times" },
308         { "tx_collide_3times" },
309         { "tx_collide_4times" },
310         { "tx_collide_5times" },
311         { "tx_collide_6times" },
312         { "tx_collide_7times" },
313         { "tx_collide_8times" },
314         { "tx_collide_9times" },
315         { "tx_collide_10times" },
316         { "tx_collide_11times" },
317         { "tx_collide_12times" },
318         { "tx_collide_13times" },
319         { "tx_collide_14times" },
320         { "tx_collide_15times" },
321         { "tx_ucast_packets" },
322         { "tx_mcast_packets" },
323         { "tx_bcast_packets" },
324         { "tx_carrier_sense_errors" },
325         { "tx_discards" },
326         { "tx_errors" },
327
328         { "dma_writeq_full" },
329         { "dma_write_prioq_full" },
330         { "rxbds_empty" },
331         { "rx_discards" },
332         { "rx_errors" },
333         { "rx_threshold_hit" },
334
335         { "dma_readq_full" },
336         { "dma_read_prioq_full" },
337         { "tx_comp_queue_full" },
338
339         { "ring_set_send_prod_index" },
340         { "ring_status_update" },
341         { "nic_irqs" },
342         { "nic_avoided_irqs" },
343         { "nic_tx_threshold_hit" }
344 };
345
346 static struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349         { "nvram test     (online) " },
350         { "link test      (online) " },
351         { "register test  (offline)" },
352         { "memory test    (offline)" },
353         { "loopback test  (offline)" },
354         { "interrupt test (offline)" },
355 };
356
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 {
359         writel(val, tp->regs + off);
360 }
361
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 {
364         return (readl(tp->regs + off)); 
365 }
366
367 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
368 {
369         unsigned long flags;
370
371         spin_lock_irqsave(&tp->indirect_lock, flags);
372         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
374         spin_unlock_irqrestore(&tp->indirect_lock, flags);
375 }
376
377 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
378 {
379         writel(val, tp->regs + off);
380         readl(tp->regs + off);
381 }
382
383 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
384 {
385         unsigned long flags;
386         u32 val;
387
388         spin_lock_irqsave(&tp->indirect_lock, flags);
389         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
390         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392         return val;
393 }
394
395 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 {
397         unsigned long flags;
398
399         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
400                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
401                                        TG3_64BIT_REG_LOW, val);
402                 return;
403         }
404         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
405                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
406                                        TG3_64BIT_REG_LOW, val);
407                 return;
408         }
409
410         spin_lock_irqsave(&tp->indirect_lock, flags);
411         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
412         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
413         spin_unlock_irqrestore(&tp->indirect_lock, flags);
414
415         /* In indirect mode when disabling interrupts, we also need
416          * to clear the interrupt bit in the GRC local ctrl register.
417          */
418         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
419             (val == 0x1)) {
420                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
421                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
422         }
423 }
424
425 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
426 {
427         unsigned long flags;
428         u32 val;
429
430         spin_lock_irqsave(&tp->indirect_lock, flags);
431         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
432         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
433         spin_unlock_irqrestore(&tp->indirect_lock, flags);
434         return val;
435 }
436
437 /* usec_wait specifies the wait time in usec when writing to certain registers
438  * where it is unsafe to read back the register without some delay.
439  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
440  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
441  */
442 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
443 {
444         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
445             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
446                 /* Non-posted methods */
447                 tp->write32(tp, off, val);
448         else {
449                 /* Posted method */
450                 tg3_write32(tp, off, val);
451                 if (usec_wait)
452                         udelay(usec_wait);
453                 tp->read32(tp, off);
454         }
455         /* Wait again after the read for the posted method to guarantee that
456          * the wait time is met.
457          */
458         if (usec_wait)
459                 udelay(usec_wait);
460 }
461
462 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
463 {
464         tp->write32_mbox(tp, off, val);
465         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
466             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467                 tp->read32_mbox(tp, off);
468 }
469
470 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
471 {
472         void __iomem *mbox = tp->regs + off;
473         writel(val, mbox);
474         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
475                 writel(val, mbox);
476         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
477                 readl(mbox);
478 }
479
480 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
481 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
482 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
483 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
484 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
485
486 #define tw32(reg,val)           tp->write32(tp, reg, val)
487 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
488 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
489 #define tr32(reg)               tp->read32(tp, reg)
490
491 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&tp->indirect_lock, flags);
496         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
497         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
498
499         /* Always leave this as zero. */
500         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503
504 static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
505 {
506         /* If no workaround is needed, write to mem space directly */
507         if (tp->write32 != tg3_write_indirect_reg32)
508                 tw32(NIC_SRAM_WIN_BASE + off, val);
509         else
510                 tg3_write_mem(tp, off, val);
511 }
512
513 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
514 {
515         unsigned long flags;
516
517         spin_lock_irqsave(&tp->indirect_lock, flags);
518         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
519         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
520
521         /* Always leave this as zero. */
522         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 }
525
526 static void tg3_disable_ints(struct tg3 *tp)
527 {
528         tw32(TG3PCI_MISC_HOST_CTRL,
529              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
531 }
532
533 static inline void tg3_cond_int(struct tg3 *tp)
534 {
535         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
536             (tp->hw_status->status & SD_STATUS_UPDATED))
537                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
538 }
539
540 static void tg3_enable_ints(struct tg3 *tp)
541 {
542         tp->irq_sync = 0;
543         wmb();
544
545         tw32(TG3PCI_MISC_HOST_CTRL,
546              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
548                        (tp->last_tag << 24));
549         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
550                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
551                                (tp->last_tag << 24));
552         tg3_cond_int(tp);
553 }
554
555 static inline unsigned int tg3_has_work(struct tg3 *tp)
556 {
557         struct tg3_hw_status *sblk = tp->hw_status;
558         unsigned int work_exists = 0;
559
560         /* check for phy events */
561         if (!(tp->tg3_flags &
562               (TG3_FLAG_USE_LINKCHG_REG |
563                TG3_FLAG_POLL_SERDES))) {
564                 if (sblk->status & SD_STATUS_LINK_CHG)
565                         work_exists = 1;
566         }
567         /* check for RX/TX work to do */
568         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
569             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
570                 work_exists = 1;
571
572         return work_exists;
573 }
574
575 /* tg3_restart_ints
576  *  similar to tg3_enable_ints, but it accurately determines whether there
577  *  is new work pending and can return without flushing the PIO write
578  *  which reenables interrupts 
579  */
580 static void tg3_restart_ints(struct tg3 *tp)
581 {
582         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
583                      tp->last_tag << 24);
584         mmiowb();
585
586         /* When doing tagged status, this work check is unnecessary.
587          * The last_tag we write above tells the chip which piece of
588          * work we've completed.
589          */
590         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
591             tg3_has_work(tp))
592                 tw32(HOSTCC_MODE, tp->coalesce_mode |
593                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
594 }
595
596 static inline void tg3_netif_stop(struct tg3 *tp)
597 {
598         tp->dev->trans_start = jiffies; /* prevent tx timeout */
599         netif_poll_disable(tp->dev);
600         netif_tx_disable(tp->dev);
601 }
602
603 static inline void tg3_netif_start(struct tg3 *tp)
604 {
605         netif_wake_queue(tp->dev);
606         /* NOTE: unconditional netif_wake_queue is only appropriate
607          * so long as all callers are assured to have free tx slots
608          * (such as after tg3_init_hw)
609          */
610         netif_poll_enable(tp->dev);
611         tp->hw_status->status |= SD_STATUS_UPDATED;
612         tg3_enable_ints(tp);
613 }
614
615 static void tg3_switch_clocks(struct tg3 *tp)
616 {
617         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
618         u32 orig_clock_ctrl;
619
620         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
621                 return;
622
623         orig_clock_ctrl = clock_ctrl;
624         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
625                        CLOCK_CTRL_CLKRUN_OENABLE |
626                        0x1f);
627         tp->pci_clock_ctrl = clock_ctrl;
628
629         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
630                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
631                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
632                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
633                 }
634         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
635                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
636                             clock_ctrl |
637                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
638                             40);
639                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
640                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
641                             40);
642         }
643         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
644 }
645
646 #define PHY_BUSY_LOOPS  5000
647
648 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
649 {
650         u32 frame_val;
651         unsigned int loops;
652         int ret;
653
654         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
655                 tw32_f(MAC_MI_MODE,
656                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
657                 udelay(80);
658         }
659
660         *val = 0x0;
661
662         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
663                       MI_COM_PHY_ADDR_MASK);
664         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
665                       MI_COM_REG_ADDR_MASK);
666         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
667         
668         tw32_f(MAC_MI_COM, frame_val);
669
670         loops = PHY_BUSY_LOOPS;
671         while (loops != 0) {
672                 udelay(10);
673                 frame_val = tr32(MAC_MI_COM);
674
675                 if ((frame_val & MI_COM_BUSY) == 0) {
676                         udelay(5);
677                         frame_val = tr32(MAC_MI_COM);
678                         break;
679                 }
680                 loops -= 1;
681         }
682
683         ret = -EBUSY;
684         if (loops != 0) {
685                 *val = frame_val & MI_COM_DATA_MASK;
686                 ret = 0;
687         }
688
689         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
690                 tw32_f(MAC_MI_MODE, tp->mi_mode);
691                 udelay(80);
692         }
693
694         return ret;
695 }
696
697 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
698 {
699         u32 frame_val;
700         unsigned int loops;
701         int ret;
702
703         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
704                 tw32_f(MAC_MI_MODE,
705                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
706                 udelay(80);
707         }
708
709         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
710                       MI_COM_PHY_ADDR_MASK);
711         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
712                       MI_COM_REG_ADDR_MASK);
713         frame_val |= (val & MI_COM_DATA_MASK);
714         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
715         
716         tw32_f(MAC_MI_COM, frame_val);
717
718         loops = PHY_BUSY_LOOPS;
719         while (loops != 0) {
720                 udelay(10);
721                 frame_val = tr32(MAC_MI_COM);
722                 if ((frame_val & MI_COM_BUSY) == 0) {
723                         udelay(5);
724                         frame_val = tr32(MAC_MI_COM);
725                         break;
726                 }
727                 loops -= 1;
728         }
729
730         ret = -EBUSY;
731         if (loops != 0)
732                 ret = 0;
733
734         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
735                 tw32_f(MAC_MI_MODE, tp->mi_mode);
736                 udelay(80);
737         }
738
739         return ret;
740 }
741
742 static void tg3_phy_set_wirespeed(struct tg3 *tp)
743 {
744         u32 val;
745
746         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
747                 return;
748
749         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
750             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
751                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
752                              (val | (1 << 15) | (1 << 4)));
753 }
754
755 static int tg3_bmcr_reset(struct tg3 *tp)
756 {
757         u32 phy_control;
758         int limit, err;
759
760         /* OK, reset it, and poll the BMCR_RESET bit until it
761          * clears or we time out.
762          */
763         phy_control = BMCR_RESET;
764         err = tg3_writephy(tp, MII_BMCR, phy_control);
765         if (err != 0)
766                 return -EBUSY;
767
768         limit = 5000;
769         while (limit--) {
770                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
771                 if (err != 0)
772                         return -EBUSY;
773
774                 if ((phy_control & BMCR_RESET) == 0) {
775                         udelay(40);
776                         break;
777                 }
778                 udelay(10);
779         }
780         if (limit <= 0)
781                 return -EBUSY;
782
783         return 0;
784 }
785
786 static int tg3_wait_macro_done(struct tg3 *tp)
787 {
788         int limit = 100;
789
790         while (limit--) {
791                 u32 tmp32;
792
793                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
794                         if ((tmp32 & 0x1000) == 0)
795                                 break;
796                 }
797         }
798         if (limit <= 0)
799                 return -EBUSY;
800
801         return 0;
802 }
803
804 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
805 {
806         static const u32 test_pat[4][6] = {
807         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
808         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
809         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
810         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
811         };
812         int chan;
813
814         for (chan = 0; chan < 4; chan++) {
815                 int i;
816
817                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
818                              (chan * 0x2000) | 0x0200);
819                 tg3_writephy(tp, 0x16, 0x0002);
820
821                 for (i = 0; i < 6; i++)
822                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
823                                      test_pat[chan][i]);
824
825                 tg3_writephy(tp, 0x16, 0x0202);
826                 if (tg3_wait_macro_done(tp)) {
827                         *resetp = 1;
828                         return -EBUSY;
829                 }
830
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
832                              (chan * 0x2000) | 0x0200);
833                 tg3_writephy(tp, 0x16, 0x0082);
834                 if (tg3_wait_macro_done(tp)) {
835                         *resetp = 1;
836                         return -EBUSY;
837                 }
838
839                 tg3_writephy(tp, 0x16, 0x0802);
840                 if (tg3_wait_macro_done(tp)) {
841                         *resetp = 1;
842                         return -EBUSY;
843                 }
844
845                 for (i = 0; i < 6; i += 2) {
846                         u32 low, high;
847
848                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
849                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
850                             tg3_wait_macro_done(tp)) {
851                                 *resetp = 1;
852                                 return -EBUSY;
853                         }
854                         low &= 0x7fff;
855                         high &= 0x000f;
856                         if (low != test_pat[chan][i] ||
857                             high != test_pat[chan][i+1]) {
858                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
859                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
860                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
861
862                                 return -EBUSY;
863                         }
864                 }
865         }
866
867         return 0;
868 }
869
870 static int tg3_phy_reset_chanpat(struct tg3 *tp)
871 {
872         int chan;
873
874         for (chan = 0; chan < 4; chan++) {
875                 int i;
876
877                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
878                              (chan * 0x2000) | 0x0200);
879                 tg3_writephy(tp, 0x16, 0x0002);
880                 for (i = 0; i < 6; i++)
881                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
882                 tg3_writephy(tp, 0x16, 0x0202);
883                 if (tg3_wait_macro_done(tp))
884                         return -EBUSY;
885         }
886
887         return 0;
888 }
889
890 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
891 {
892         u32 reg32, phy9_orig;
893         int retries, do_phy_reset, err;
894
895         retries = 10;
896         do_phy_reset = 1;
897         do {
898                 if (do_phy_reset) {
899                         err = tg3_bmcr_reset(tp);
900                         if (err)
901                                 return err;
902                         do_phy_reset = 0;
903                 }
904
905                 /* Disable transmitter and interrupt.  */
906                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
907                         continue;
908
909                 reg32 |= 0x3000;
910                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
911
912                 /* Set full-duplex, 1000 mbps.  */
913                 tg3_writephy(tp, MII_BMCR,
914                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
915
916                 /* Set to master mode.  */
917                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
918                         continue;
919
920                 tg3_writephy(tp, MII_TG3_CTRL,
921                              (MII_TG3_CTRL_AS_MASTER |
922                               MII_TG3_CTRL_ENABLE_AS_MASTER));
923
924                 /* Enable SM_DSP_CLOCK and 6dB.  */
925                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
926
927                 /* Block the PHY control access.  */
928                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
929                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
930
931                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
932                 if (!err)
933                         break;
934         } while (--retries);
935
936         err = tg3_phy_reset_chanpat(tp);
937         if (err)
938                 return err;
939
940         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
942
943         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
944         tg3_writephy(tp, 0x16, 0x0000);
945
946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
948                 /* Set Extended packet length bit for jumbo frames */
949                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
950         }
951         else {
952                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
953         }
954
955         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
956
957         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
958                 reg32 &= ~0x3000;
959                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
960         } else if (!err)
961                 err = -EBUSY;
962
963         return err;
964 }
965
966 /* This will reset the tigon3 PHY if there is no valid
967  * link unless the FORCE argument is non-zero.
968  */
969 static int tg3_phy_reset(struct tg3 *tp)
970 {
971         u32 phy_status;
972         int err;
973
974         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
975         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
976         if (err != 0)
977                 return -EBUSY;
978
979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
982                 err = tg3_phy_reset_5703_4_5(tp);
983                 if (err)
984                         return err;
985                 goto out;
986         }
987
988         err = tg3_bmcr_reset(tp);
989         if (err)
990                 return err;
991
992 out:
993         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
994                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
995                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
996                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
997                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
998                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1002                 tg3_writephy(tp, 0x1c, 0x8d68);
1003                 tg3_writephy(tp, 0x1c, 0x8d68);
1004         }
1005         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1006                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1009                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1010                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1011                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1012                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1013                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1014         }
1015         /* Set Extended packet length bit (bit 14) on all chips that */
1016         /* support jumbo frames */
1017         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1018                 /* Cannot do read-modify-write on 5401 */
1019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1020         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1021                 u32 phy_reg;
1022
1023                 /* Set bit 14 with read-modify-write to preserve other bits */
1024                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1025                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1026                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1027         }
1028
1029         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1030          * jumbo frames transmission.
1031          */
1032         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1033                 u32 phy_reg;
1034
1035                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1036                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1037                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1038         }
1039
1040         tg3_phy_set_wirespeed(tp);
1041         return 0;
1042 }
1043
1044 static void tg3_frob_aux_power(struct tg3 *tp)
1045 {
1046         struct tg3 *tp_peer = tp;
1047
1048         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1049                 return;
1050
1051         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1052             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1053                 struct net_device *dev_peer;
1054
1055                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1056                 /* remove_one() may have been run on the peer. */
1057                 if (!dev_peer)
1058                         tp_peer = tp;
1059                 else
1060                         tp_peer = netdev_priv(dev_peer);
1061         }
1062
1063         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1064             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1065             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1066             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1068                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1069                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1070                                     (GRC_LCLCTRL_GPIO_OE0 |
1071                                      GRC_LCLCTRL_GPIO_OE1 |
1072                                      GRC_LCLCTRL_GPIO_OE2 |
1073                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1074                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1075                                     100);
1076                 } else {
1077                         u32 no_gpio2;
1078                         u32 grc_local_ctrl = 0;
1079
1080                         if (tp_peer != tp &&
1081                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1082                                 return;
1083
1084                         /* Workaround to prevent overdrawing Amps. */
1085                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1086                             ASIC_REV_5714) {
1087                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1088                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1089                                             grc_local_ctrl, 100);
1090                         }
1091
1092                         /* On 5753 and variants, GPIO2 cannot be used. */
1093                         no_gpio2 = tp->nic_sram_data_cfg &
1094                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1095
1096                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1097                                          GRC_LCLCTRL_GPIO_OE1 |
1098                                          GRC_LCLCTRL_GPIO_OE2 |
1099                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1100                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1101                         if (no_gpio2) {
1102                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1103                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1104                         }
1105                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1106                                                     grc_local_ctrl, 100);
1107
1108                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1109
1110                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1111                                                     grc_local_ctrl, 100);
1112
1113                         if (!no_gpio2) {
1114                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1115                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1116                                             grc_local_ctrl, 100);
1117                         }
1118                 }
1119         } else {
1120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1121                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1122                         if (tp_peer != tp &&
1123                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1124                                 return;
1125
1126                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                     (GRC_LCLCTRL_GPIO_OE1 |
1128                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1129
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                     GRC_LCLCTRL_GPIO_OE1, 100);
1132
1133                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1134                                     (GRC_LCLCTRL_GPIO_OE1 |
1135                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1136                 }
1137         }
1138 }
1139
1140 static int tg3_setup_phy(struct tg3 *, int);
1141
1142 #define RESET_KIND_SHUTDOWN     0
1143 #define RESET_KIND_INIT         1
1144 #define RESET_KIND_SUSPEND      2
1145
1146 static void tg3_write_sig_post_reset(struct tg3 *, int);
1147 static int tg3_halt_cpu(struct tg3 *, u32);
1148 static int tg3_nvram_lock(struct tg3 *);
1149 static void tg3_nvram_unlock(struct tg3 *);
1150
1151 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1152 {
1153         u32 misc_host_ctrl;
1154         u16 power_control, power_caps;
1155         int pm = tp->pm_cap;
1156
1157         /* Make sure register accesses (indirect or otherwise)
1158          * will function correctly.
1159          */
1160         pci_write_config_dword(tp->pdev,
1161                                TG3PCI_MISC_HOST_CTRL,
1162                                tp->misc_host_ctrl);
1163
1164         pci_read_config_word(tp->pdev,
1165                              pm + PCI_PM_CTRL,
1166                              &power_control);
1167         power_control |= PCI_PM_CTRL_PME_STATUS;
1168         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1169         switch (state) {
1170         case PCI_D0:
1171                 power_control |= 0;
1172                 pci_write_config_word(tp->pdev,
1173                                       pm + PCI_PM_CTRL,
1174                                       power_control);
1175                 udelay(100);    /* Delay after power state change */
1176
1177                 /* Switch out of Vaux if it is not a LOM */
1178                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1179                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1180
1181                 return 0;
1182
1183         case PCI_D1:
1184                 power_control |= 1;
1185                 break;
1186
1187         case PCI_D2:
1188                 power_control |= 2;
1189                 break;
1190
1191         case PCI_D3hot:
1192                 power_control |= 3;
1193                 break;
1194
1195         default:
1196                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1197                        "requested.\n",
1198                        tp->dev->name, state);
1199                 return -EINVAL;
1200         };
1201
1202         power_control |= PCI_PM_CTRL_PME_ENABLE;
1203
1204         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1205         tw32(TG3PCI_MISC_HOST_CTRL,
1206              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1207
1208         if (tp->link_config.phy_is_low_power == 0) {
1209                 tp->link_config.phy_is_low_power = 1;
1210                 tp->link_config.orig_speed = tp->link_config.speed;
1211                 tp->link_config.orig_duplex = tp->link_config.duplex;
1212                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1213         }
1214
1215         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1216                 tp->link_config.speed = SPEED_10;
1217                 tp->link_config.duplex = DUPLEX_HALF;
1218                 tp->link_config.autoneg = AUTONEG_ENABLE;
1219                 tg3_setup_phy(tp, 0);
1220         }
1221
1222         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1223                 int i;
1224                 u32 val;
1225
1226                 for (i = 0; i < 200; i++) {
1227                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1228                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1229                                 break;
1230                         msleep(1);
1231                 }
1232         }
1233         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1234                                              WOL_DRV_STATE_SHUTDOWN |
1235                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1236
1237         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1238
1239         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1240                 u32 mac_mode;
1241
1242                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1243                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1244                         udelay(40);
1245
1246                         mac_mode = MAC_MODE_PORT_MODE_MII;
1247
1248                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1249                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1250                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1251                 } else {
1252                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1253                 }
1254
1255                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1256                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1257
1258                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1259                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1260                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1261
1262                 tw32_f(MAC_MODE, mac_mode);
1263                 udelay(100);
1264
1265                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1266                 udelay(10);
1267         }
1268
1269         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1270             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1271              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1272                 u32 base_val;
1273
1274                 base_val = tp->pci_clock_ctrl;
1275                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1276                              CLOCK_CTRL_TXCLK_DISABLE);
1277
1278                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1279                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1280         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1281                 /* do nothing */
1282         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1283                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1284                 u32 newbits1, newbits2;
1285
1286                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1287                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1288                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1289                                     CLOCK_CTRL_TXCLK_DISABLE |
1290                                     CLOCK_CTRL_ALTCLK);
1291                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1292                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1293                         newbits1 = CLOCK_CTRL_625_CORE;
1294                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1295                 } else {
1296                         newbits1 = CLOCK_CTRL_ALTCLK;
1297                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1298                 }
1299
1300                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1301                             40);
1302
1303                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1304                             40);
1305
1306                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1307                         u32 newbits3;
1308
1309                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1310                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1311                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1312                                             CLOCK_CTRL_TXCLK_DISABLE |
1313                                             CLOCK_CTRL_44MHZ_CORE);
1314                         } else {
1315                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1316                         }
1317
1318                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1319                                     tp->pci_clock_ctrl | newbits3, 40);
1320                 }
1321         }
1322
1323         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1324             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1325                 /* Turn off the PHY */
1326                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1327                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1328                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1329                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1330                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
1331                                 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1332                 }
1333         }
1334
1335         tg3_frob_aux_power(tp);
1336
1337         /* Workaround for unstable PLL clock */
1338         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1339             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1340                 u32 val = tr32(0x7d00);
1341
1342                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1343                 tw32(0x7d00, val);
1344                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1345                         int err;
1346
1347                         err = tg3_nvram_lock(tp);
1348                         tg3_halt_cpu(tp, RX_CPU_BASE);
1349                         if (!err)
1350                                 tg3_nvram_unlock(tp);
1351                 }
1352         }
1353
1354         /* Finally, set the new power state. */
1355         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1356         udelay(100);    /* Delay after power state change */
1357
1358         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1359
1360         return 0;
1361 }
1362
1363 static void tg3_link_report(struct tg3 *tp)
1364 {
1365         if (!netif_carrier_ok(tp->dev)) {
1366                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1367         } else {
1368                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1369                        tp->dev->name,
1370                        (tp->link_config.active_speed == SPEED_1000 ?
1371                         1000 :
1372                         (tp->link_config.active_speed == SPEED_100 ?
1373                          100 : 10)),
1374                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1375                         "full" : "half"));
1376
1377                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1378                        "%s for RX.\n",
1379                        tp->dev->name,
1380                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1381                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1382         }
1383 }
1384
1385 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1386 {
1387         u32 new_tg3_flags = 0;
1388         u32 old_rx_mode = tp->rx_mode;
1389         u32 old_tx_mode = tp->tx_mode;
1390
1391         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1392
1393                 /* Convert 1000BaseX flow control bits to 1000BaseT
1394                  * bits before resolving flow control.
1395                  */
1396                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1397                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1398                                        ADVERTISE_PAUSE_ASYM);
1399                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1400
1401                         if (local_adv & ADVERTISE_1000XPAUSE)
1402                                 local_adv |= ADVERTISE_PAUSE_CAP;
1403                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1404                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1405                         if (remote_adv & LPA_1000XPAUSE)
1406                                 remote_adv |= LPA_PAUSE_CAP;
1407                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1408                                 remote_adv |= LPA_PAUSE_ASYM;
1409                 }
1410
1411                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1412                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1413                                 if (remote_adv & LPA_PAUSE_CAP)
1414                                         new_tg3_flags |=
1415                                                 (TG3_FLAG_RX_PAUSE |
1416                                                 TG3_FLAG_TX_PAUSE);
1417                                 else if (remote_adv & LPA_PAUSE_ASYM)
1418                                         new_tg3_flags |=
1419                                                 (TG3_FLAG_RX_PAUSE);
1420                         } else {
1421                                 if (remote_adv & LPA_PAUSE_CAP)
1422                                         new_tg3_flags |=
1423                                                 (TG3_FLAG_RX_PAUSE |
1424                                                 TG3_FLAG_TX_PAUSE);
1425                         }
1426                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1427                         if ((remote_adv & LPA_PAUSE_CAP) &&
1428                         (remote_adv & LPA_PAUSE_ASYM))
1429                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1430                 }
1431
1432                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1433                 tp->tg3_flags |= new_tg3_flags;
1434         } else {
1435                 new_tg3_flags = tp->tg3_flags;
1436         }
1437
1438         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1439                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1440         else
1441                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1442
1443         if (old_rx_mode != tp->rx_mode) {
1444                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1445         }
1446         
1447         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1448                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1449         else
1450                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1451
1452         if (old_tx_mode != tp->tx_mode) {
1453                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1454         }
1455 }
1456
1457 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1458 {
1459         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1460         case MII_TG3_AUX_STAT_10HALF:
1461                 *speed = SPEED_10;
1462                 *duplex = DUPLEX_HALF;
1463                 break;
1464
1465         case MII_TG3_AUX_STAT_10FULL:
1466                 *speed = SPEED_10;
1467                 *duplex = DUPLEX_FULL;
1468                 break;
1469
1470         case MII_TG3_AUX_STAT_100HALF:
1471                 *speed = SPEED_100;
1472                 *duplex = DUPLEX_HALF;
1473                 break;
1474
1475         case MII_TG3_AUX_STAT_100FULL:
1476                 *speed = SPEED_100;
1477                 *duplex = DUPLEX_FULL;
1478                 break;
1479
1480         case MII_TG3_AUX_STAT_1000HALF:
1481                 *speed = SPEED_1000;
1482                 *duplex = DUPLEX_HALF;
1483                 break;
1484
1485         case MII_TG3_AUX_STAT_1000FULL:
1486                 *speed = SPEED_1000;
1487                 *duplex = DUPLEX_FULL;
1488                 break;
1489
1490         default:
1491                 *speed = SPEED_INVALID;
1492                 *duplex = DUPLEX_INVALID;
1493                 break;
1494         };
1495 }
1496
1497 static void tg3_phy_copper_begin(struct tg3 *tp)
1498 {
1499         u32 new_adv;
1500         int i;
1501
1502         if (tp->link_config.phy_is_low_power) {
1503                 /* Entering low power mode.  Disable gigabit and
1504                  * 100baseT advertisements.
1505                  */
1506                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1507
1508                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1509                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1510                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1511                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1512
1513                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1514         } else if (tp->link_config.speed == SPEED_INVALID) {
1515                 tp->link_config.advertising =
1516                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1517                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1518                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1519                          ADVERTISED_Autoneg | ADVERTISED_MII);
1520
1521                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1522                         tp->link_config.advertising &=
1523                                 ~(ADVERTISED_1000baseT_Half |
1524                                   ADVERTISED_1000baseT_Full);
1525
1526                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1527                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1528                         new_adv |= ADVERTISE_10HALF;
1529                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1530                         new_adv |= ADVERTISE_10FULL;
1531                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1532                         new_adv |= ADVERTISE_100HALF;
1533                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1534                         new_adv |= ADVERTISE_100FULL;
1535                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1536
1537                 if (tp->link_config.advertising &
1538                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1539                         new_adv = 0;
1540                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1541                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1542                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1543                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1544                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1545                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1546                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1547                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1548                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1549                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1550                 } else {
1551                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1552                 }
1553         } else {
1554                 /* Asking for a specific link mode. */
1555                 if (tp->link_config.speed == SPEED_1000) {
1556                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1557                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1558
1559                         if (tp->link_config.duplex == DUPLEX_FULL)
1560                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1561                         else
1562                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1563                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1564                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1565                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1566                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1567                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1568                 } else {
1569                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1570
1571                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1572                         if (tp->link_config.speed == SPEED_100) {
1573                                 if (tp->link_config.duplex == DUPLEX_FULL)
1574                                         new_adv |= ADVERTISE_100FULL;
1575                                 else
1576                                         new_adv |= ADVERTISE_100HALF;
1577                         } else {
1578                                 if (tp->link_config.duplex == DUPLEX_FULL)
1579                                         new_adv |= ADVERTISE_10FULL;
1580                                 else
1581                                         new_adv |= ADVERTISE_10HALF;
1582                         }
1583                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1584                 }
1585         }
1586
1587         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1588             tp->link_config.speed != SPEED_INVALID) {
1589                 u32 bmcr, orig_bmcr;
1590
1591                 tp->link_config.active_speed = tp->link_config.speed;
1592                 tp->link_config.active_duplex = tp->link_config.duplex;
1593
1594                 bmcr = 0;
1595                 switch (tp->link_config.speed) {
1596                 default:
1597                 case SPEED_10:
1598                         break;
1599
1600                 case SPEED_100:
1601                         bmcr |= BMCR_SPEED100;
1602                         break;
1603
1604                 case SPEED_1000:
1605                         bmcr |= TG3_BMCR_SPEED1000;
1606                         break;
1607                 };
1608
1609                 if (tp->link_config.duplex == DUPLEX_FULL)
1610                         bmcr |= BMCR_FULLDPLX;
1611
1612                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1613                     (bmcr != orig_bmcr)) {
1614                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1615                         for (i = 0; i < 1500; i++) {
1616                                 u32 tmp;
1617
1618                                 udelay(10);
1619                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1620                                     tg3_readphy(tp, MII_BMSR, &tmp))
1621                                         continue;
1622                                 if (!(tmp & BMSR_LSTATUS)) {
1623                                         udelay(40);
1624                                         break;
1625                                 }
1626                         }
1627                         tg3_writephy(tp, MII_BMCR, bmcr);
1628                         udelay(40);
1629                 }
1630         } else {
1631                 tg3_writephy(tp, MII_BMCR,
1632                              BMCR_ANENABLE | BMCR_ANRESTART);
1633         }
1634 }
1635
1636 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1637 {
1638         int err;
1639
1640         /* Turn off tap power management. */
1641         /* Set Extended packet length bit */
1642         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1643
1644         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1645         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1646
1647         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1648         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1649
1650         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1651         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1652
1653         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1654         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1655
1656         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1657         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1658
1659         udelay(40);
1660
1661         return err;
1662 }
1663
1664 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1665 {
1666         u32 adv_reg, all_mask;
1667
1668         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1669                 return 0;
1670
1671         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1672                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1673         if ((adv_reg & all_mask) != all_mask)
1674                 return 0;
1675         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1676                 u32 tg3_ctrl;
1677
1678                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1679                         return 0;
1680
1681                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1682                             MII_TG3_CTRL_ADV_1000_FULL);
1683                 if ((tg3_ctrl & all_mask) != all_mask)
1684                         return 0;
1685         }
1686         return 1;
1687 }
1688
1689 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1690 {
1691         int current_link_up;
1692         u32 bmsr, dummy;
1693         u16 current_speed;
1694         u8 current_duplex;
1695         int i, err;
1696
1697         tw32(MAC_EVENT, 0);
1698
1699         tw32_f(MAC_STATUS,
1700              (MAC_STATUS_SYNC_CHANGED |
1701               MAC_STATUS_CFG_CHANGED |
1702               MAC_STATUS_MI_COMPLETION |
1703               MAC_STATUS_LNKSTATE_CHANGED));
1704         udelay(40);
1705
1706         tp->mi_mode = MAC_MI_MODE_BASE;
1707         tw32_f(MAC_MI_MODE, tp->mi_mode);
1708         udelay(80);
1709
1710         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1711
1712         /* Some third-party PHYs need to be reset on link going
1713          * down.
1714          */
1715         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1716              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1717              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1718             netif_carrier_ok(tp->dev)) {
1719                 tg3_readphy(tp, MII_BMSR, &bmsr);
1720                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1721                     !(bmsr & BMSR_LSTATUS))
1722                         force_reset = 1;
1723         }
1724         if (force_reset)
1725                 tg3_phy_reset(tp);
1726
1727         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1728                 tg3_readphy(tp, MII_BMSR, &bmsr);
1729                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1730                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1731                         bmsr = 0;
1732
1733                 if (!(bmsr & BMSR_LSTATUS)) {
1734                         err = tg3_init_5401phy_dsp(tp);
1735                         if (err)
1736                                 return err;
1737
1738                         tg3_readphy(tp, MII_BMSR, &bmsr);
1739                         for (i = 0; i < 1000; i++) {
1740                                 udelay(10);
1741                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1742                                     (bmsr & BMSR_LSTATUS)) {
1743                                         udelay(40);
1744                                         break;
1745                                 }
1746                         }
1747
1748                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1749                             !(bmsr & BMSR_LSTATUS) &&
1750                             tp->link_config.active_speed == SPEED_1000) {
1751                                 err = tg3_phy_reset(tp);
1752                                 if (!err)
1753                                         err = tg3_init_5401phy_dsp(tp);
1754                                 if (err)
1755                                         return err;
1756                         }
1757                 }
1758         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1759                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1760                 /* 5701 {A0,B0} CRC bug workaround */
1761                 tg3_writephy(tp, 0x15, 0x0a75);
1762                 tg3_writephy(tp, 0x1c, 0x8c68);
1763                 tg3_writephy(tp, 0x1c, 0x8d68);
1764                 tg3_writephy(tp, 0x1c, 0x8c68);
1765         }
1766
1767         /* Clear pending interrupts... */
1768         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1769         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1770
1771         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1772                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1773         else
1774                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1775
1776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1778                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1779                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1780                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1781                 else
1782                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1783         }
1784
1785         current_link_up = 0;
1786         current_speed = SPEED_INVALID;
1787         current_duplex = DUPLEX_INVALID;
1788
1789         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1790                 u32 val;
1791
1792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1793                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1794                 if (!(val & (1 << 10))) {
1795                         val |= (1 << 10);
1796                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797                         goto relink;
1798                 }
1799         }
1800
1801         bmsr = 0;
1802         for (i = 0; i < 100; i++) {
1803                 tg3_readphy(tp, MII_BMSR, &bmsr);
1804                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1805                     (bmsr & BMSR_LSTATUS))
1806                         break;
1807                 udelay(40);
1808         }
1809
1810         if (bmsr & BMSR_LSTATUS) {
1811                 u32 aux_stat, bmcr;
1812
1813                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1814                 for (i = 0; i < 2000; i++) {
1815                         udelay(10);
1816                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1817                             aux_stat)
1818                                 break;
1819                 }
1820
1821                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1822                                              &current_speed,
1823                                              &current_duplex);
1824
1825                 bmcr = 0;
1826                 for (i = 0; i < 200; i++) {
1827                         tg3_readphy(tp, MII_BMCR, &bmcr);
1828                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1829                                 continue;
1830                         if (bmcr && bmcr != 0x7fff)
1831                                 break;
1832                         udelay(10);
1833                 }
1834
1835                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1836                         if (bmcr & BMCR_ANENABLE) {
1837                                 current_link_up = 1;
1838
1839                                 /* Force autoneg restart if we are exiting
1840                                  * low power mode.
1841                                  */
1842                                 if (!tg3_copper_is_advertising_all(tp))
1843                                         current_link_up = 0;
1844                         } else {
1845                                 current_link_up = 0;
1846                         }
1847                 } else {
1848                         if (!(bmcr & BMCR_ANENABLE) &&
1849                             tp->link_config.speed == current_speed &&
1850                             tp->link_config.duplex == current_duplex) {
1851                                 current_link_up = 1;
1852                         } else {
1853                                 current_link_up = 0;
1854                         }
1855                 }
1856
1857                 tp->link_config.active_speed = current_speed;
1858                 tp->link_config.active_duplex = current_duplex;
1859         }
1860
1861         if (current_link_up == 1 &&
1862             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1863             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1864                 u32 local_adv, remote_adv;
1865
1866                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1867                         local_adv = 0;
1868                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1869
1870                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1871                         remote_adv = 0;
1872
1873                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1874
1875                 /* If we are not advertising full pause capability,
1876                  * something is wrong.  Bring the link down and reconfigure.
1877                  */
1878                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1879                         current_link_up = 0;
1880                 } else {
1881                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1882                 }
1883         }
1884 relink:
1885         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1886                 u32 tmp;
1887
1888                 tg3_phy_copper_begin(tp);
1889
1890                 tg3_readphy(tp, MII_BMSR, &tmp);
1891                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1892                     (tmp & BMSR_LSTATUS))
1893                         current_link_up = 1;
1894         }
1895
1896         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1897         if (current_link_up == 1) {
1898                 if (tp->link_config.active_speed == SPEED_100 ||
1899                     tp->link_config.active_speed == SPEED_10)
1900                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1901                 else
1902                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1903         } else
1904                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1905
1906         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1907         if (tp->link_config.active_duplex == DUPLEX_HALF)
1908                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1909
1910         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1911         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1912                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1913                     (current_link_up == 1 &&
1914                      tp->link_config.active_speed == SPEED_10))
1915                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1916         } else {
1917                 if (current_link_up == 1)
1918                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1919         }
1920
1921         /* ??? Without this setting Netgear GA302T PHY does not
1922          * ??? send/receive packets...
1923          */
1924         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1925             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1926                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1927                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1928                 udelay(80);
1929         }
1930
1931         tw32_f(MAC_MODE, tp->mac_mode);
1932         udelay(40);
1933
1934         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1935                 /* Polled via timer. */
1936                 tw32_f(MAC_EVENT, 0);
1937         } else {
1938                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1939         }
1940         udelay(40);
1941
1942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1943             current_link_up == 1 &&
1944             tp->link_config.active_speed == SPEED_1000 &&
1945             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1946              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1947                 udelay(120);
1948                 tw32_f(MAC_STATUS,
1949                      (MAC_STATUS_SYNC_CHANGED |
1950                       MAC_STATUS_CFG_CHANGED));
1951                 udelay(40);
1952                 tg3_write_mem(tp,
1953                               NIC_SRAM_FIRMWARE_MBOX,
1954                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1955         }
1956
1957         if (current_link_up != netif_carrier_ok(tp->dev)) {
1958                 if (current_link_up)
1959                         netif_carrier_on(tp->dev);
1960                 else
1961                         netif_carrier_off(tp->dev);
1962                 tg3_link_report(tp);
1963         }
1964
1965         return 0;
1966 }
1967
1968 struct tg3_fiber_aneginfo {
1969         int state;
1970 #define ANEG_STATE_UNKNOWN              0
1971 #define ANEG_STATE_AN_ENABLE            1
1972 #define ANEG_STATE_RESTART_INIT         2
1973 #define ANEG_STATE_RESTART              3
1974 #define ANEG_STATE_DISABLE_LINK_OK      4
1975 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1976 #define ANEG_STATE_ABILITY_DETECT       6
1977 #define ANEG_STATE_ACK_DETECT_INIT      7
1978 #define ANEG_STATE_ACK_DETECT           8
1979 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1980 #define ANEG_STATE_COMPLETE_ACK         10
1981 #define ANEG_STATE_IDLE_DETECT_INIT     11
1982 #define ANEG_STATE_IDLE_DETECT          12
1983 #define ANEG_STATE_LINK_OK              13
1984 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1985 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1986
1987         u32 flags;
1988 #define MR_AN_ENABLE            0x00000001
1989 #define MR_RESTART_AN           0x00000002
1990 #define MR_AN_COMPLETE          0x00000004
1991 #define MR_PAGE_RX              0x00000008
1992 #define MR_NP_LOADED            0x00000010
1993 #define MR_TOGGLE_TX            0x00000020
1994 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1995 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1996 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1997 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1998 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1999 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2000 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2001 #define MR_TOGGLE_RX            0x00002000
2002 #define MR_NP_RX                0x00004000
2003
2004 #define MR_LINK_OK              0x80000000
2005
2006         unsigned long link_time, cur_time;
2007
2008         u32 ability_match_cfg;
2009         int ability_match_count;
2010
2011         char ability_match, idle_match, ack_match;
2012
2013         u32 txconfig, rxconfig;
2014 #define ANEG_CFG_NP             0x00000080
2015 #define ANEG_CFG_ACK            0x00000040
2016 #define ANEG_CFG_RF2            0x00000020
2017 #define ANEG_CFG_RF1            0x00000010
2018 #define ANEG_CFG_PS2            0x00000001
2019 #define ANEG_CFG_PS1            0x00008000
2020 #define ANEG_CFG_HD             0x00004000
2021 #define ANEG_CFG_FD             0x00002000
2022 #define ANEG_CFG_INVAL          0x00001f06
2023
2024 };
2025 #define ANEG_OK         0
2026 #define ANEG_DONE       1
2027 #define ANEG_TIMER_ENAB 2
2028 #define ANEG_FAILED     -1
2029
2030 #define ANEG_STATE_SETTLE_TIME  10000
2031
2032 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2033                                    struct tg3_fiber_aneginfo *ap)
2034 {
2035         unsigned long delta;
2036         u32 rx_cfg_reg;
2037         int ret;
2038
2039         if (ap->state == ANEG_STATE_UNKNOWN) {
2040                 ap->rxconfig = 0;
2041                 ap->link_time = 0;
2042                 ap->cur_time = 0;
2043                 ap->ability_match_cfg = 0;
2044                 ap->ability_match_count = 0;
2045                 ap->ability_match = 0;
2046                 ap->idle_match = 0;
2047                 ap->ack_match = 0;
2048         }
2049         ap->cur_time++;
2050
2051         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2052                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2053
2054                 if (rx_cfg_reg != ap->ability_match_cfg) {
2055                         ap->ability_match_cfg = rx_cfg_reg;
2056                         ap->ability_match = 0;
2057                         ap->ability_match_count = 0;
2058                 } else {
2059                         if (++ap->ability_match_count > 1) {
2060                                 ap->ability_match = 1;
2061                                 ap->ability_match_cfg = rx_cfg_reg;
2062                         }
2063                 }
2064                 if (rx_cfg_reg & ANEG_CFG_ACK)
2065                         ap->ack_match = 1;
2066                 else
2067                         ap->ack_match = 0;
2068
2069                 ap->idle_match = 0;
2070         } else {
2071                 ap->idle_match = 1;
2072                 ap->ability_match_cfg = 0;
2073                 ap->ability_match_count = 0;
2074                 ap->ability_match = 0;
2075                 ap->ack_match = 0;
2076
2077                 rx_cfg_reg = 0;
2078         }
2079
2080         ap->rxconfig = rx_cfg_reg;
2081         ret = ANEG_OK;
2082
2083         switch(ap->state) {
2084         case ANEG_STATE_UNKNOWN:
2085                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2086                         ap->state = ANEG_STATE_AN_ENABLE;
2087
2088                 /* fallthru */
2089         case ANEG_STATE_AN_ENABLE:
2090                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2091                 if (ap->flags & MR_AN_ENABLE) {
2092                         ap->link_time = 0;
2093                         ap->cur_time = 0;
2094                         ap->ability_match_cfg = 0;
2095                         ap->ability_match_count = 0;
2096                         ap->ability_match = 0;
2097                         ap->idle_match = 0;
2098                         ap->ack_match = 0;
2099
2100                         ap->state = ANEG_STATE_RESTART_INIT;
2101                 } else {
2102                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2103                 }
2104                 break;
2105
2106         case ANEG_STATE_RESTART_INIT:
2107                 ap->link_time = ap->cur_time;
2108                 ap->flags &= ~(MR_NP_LOADED);
2109                 ap->txconfig = 0;
2110                 tw32(MAC_TX_AUTO_NEG, 0);
2111                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2112                 tw32_f(MAC_MODE, tp->mac_mode);
2113                 udelay(40);
2114
2115                 ret = ANEG_TIMER_ENAB;
2116                 ap->state = ANEG_STATE_RESTART;
2117
2118                 /* fallthru */
2119         case ANEG_STATE_RESTART:
2120                 delta = ap->cur_time - ap->link_time;
2121                 if (delta > ANEG_STATE_SETTLE_TIME) {
2122                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2123                 } else {
2124                         ret = ANEG_TIMER_ENAB;
2125                 }
2126                 break;
2127
2128         case ANEG_STATE_DISABLE_LINK_OK:
2129                 ret = ANEG_DONE;
2130                 break;
2131
2132         case ANEG_STATE_ABILITY_DETECT_INIT:
2133                 ap->flags &= ~(MR_TOGGLE_TX);
2134                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2135                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2136                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2137                 tw32_f(MAC_MODE, tp->mac_mode);
2138                 udelay(40);
2139
2140                 ap->state = ANEG_STATE_ABILITY_DETECT;
2141                 break;
2142
2143         case ANEG_STATE_ABILITY_DETECT:
2144                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2145                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2146                 }
2147                 break;
2148
2149         case ANEG_STATE_ACK_DETECT_INIT:
2150                 ap->txconfig |= ANEG_CFG_ACK;
2151                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2152                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2153                 tw32_f(MAC_MODE, tp->mac_mode);
2154                 udelay(40);
2155
2156                 ap->state = ANEG_STATE_ACK_DETECT;
2157
2158                 /* fallthru */
2159         case ANEG_STATE_ACK_DETECT:
2160                 if (ap->ack_match != 0) {
2161                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2162                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2163                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2164                         } else {
2165                                 ap->state = ANEG_STATE_AN_ENABLE;
2166                         }
2167                 } else if (ap->ability_match != 0 &&
2168                            ap->rxconfig == 0) {
2169                         ap->state = ANEG_STATE_AN_ENABLE;
2170                 }
2171                 break;
2172
2173         case ANEG_STATE_COMPLETE_ACK_INIT:
2174                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2175                         ret = ANEG_FAILED;
2176                         break;
2177                 }
2178                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2179                                MR_LP_ADV_HALF_DUPLEX |
2180                                MR_LP_ADV_SYM_PAUSE |
2181                                MR_LP_ADV_ASYM_PAUSE |
2182                                MR_LP_ADV_REMOTE_FAULT1 |
2183                                MR_LP_ADV_REMOTE_FAULT2 |
2184                                MR_LP_ADV_NEXT_PAGE |
2185                                MR_TOGGLE_RX |
2186                                MR_NP_RX);
2187                 if (ap->rxconfig & ANEG_CFG_FD)
2188                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2189                 if (ap->rxconfig & ANEG_CFG_HD)
2190                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2191                 if (ap->rxconfig & ANEG_CFG_PS1)
2192                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2193                 if (ap->rxconfig & ANEG_CFG_PS2)
2194                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2195                 if (ap->rxconfig & ANEG_CFG_RF1)
2196                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2197                 if (ap->rxconfig & ANEG_CFG_RF2)
2198                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2199                 if (ap->rxconfig & ANEG_CFG_NP)
2200                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2201
2202                 ap->link_time = ap->cur_time;
2203
2204                 ap->flags ^= (MR_TOGGLE_TX);
2205                 if (ap->rxconfig & 0x0008)
2206                         ap->flags |= MR_TOGGLE_RX;
2207                 if (ap->rxconfig & ANEG_CFG_NP)
2208                         ap->flags |= MR_NP_RX;
2209                 ap->flags |= MR_PAGE_RX;
2210
2211                 ap->state = ANEG_STATE_COMPLETE_ACK;
2212                 ret = ANEG_TIMER_ENAB;
2213                 break;
2214
2215         case ANEG_STATE_COMPLETE_ACK:
2216                 if (ap->ability_match != 0 &&
2217                     ap->rxconfig == 0) {
2218                         ap->state = ANEG_STATE_AN_ENABLE;
2219                         break;
2220                 }
2221                 delta = ap->cur_time - ap->link_time;
2222                 if (delta > ANEG_STATE_SETTLE_TIME) {
2223                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2224                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2225                         } else {
2226                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2227                                     !(ap->flags & MR_NP_RX)) {
2228                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2229                                 } else {
2230                                         ret = ANEG_FAILED;
2231                                 }
2232                         }
2233                 }
2234                 break;
2235
2236         case ANEG_STATE_IDLE_DETECT_INIT:
2237                 ap->link_time = ap->cur_time;
2238                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2239                 tw32_f(MAC_MODE, tp->mac_mode);
2240                 udelay(40);
2241
2242                 ap->state = ANEG_STATE_IDLE_DETECT;
2243                 ret = ANEG_TIMER_ENAB;
2244                 break;
2245
2246         case ANEG_STATE_IDLE_DETECT:
2247                 if (ap->ability_match != 0 &&
2248                     ap->rxconfig == 0) {
2249                         ap->state = ANEG_STATE_AN_ENABLE;
2250                         break;
2251                 }
2252                 delta = ap->cur_time - ap->link_time;
2253                 if (delta > ANEG_STATE_SETTLE_TIME) {
2254                         /* XXX another gem from the Broadcom driver :( */
2255                         ap->state = ANEG_STATE_LINK_OK;
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_LINK_OK:
2260                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2261                 ret = ANEG_DONE;
2262                 break;
2263
2264         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2265                 /* ??? unimplemented */
2266                 break;
2267
2268         case ANEG_STATE_NEXT_PAGE_WAIT:
2269                 /* ??? unimplemented */
2270                 break;
2271
2272         default:
2273                 ret = ANEG_FAILED;
2274                 break;
2275         };
2276
2277         return ret;
2278 }
2279
2280 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2281 {
2282         int res = 0;
2283         struct tg3_fiber_aneginfo aninfo;
2284         int status = ANEG_FAILED;
2285         unsigned int tick;
2286         u32 tmp;
2287
2288         tw32_f(MAC_TX_AUTO_NEG, 0);
2289
2290         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2291         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2292         udelay(40);
2293
2294         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2295         udelay(40);
2296
2297         memset(&aninfo, 0, sizeof(aninfo));
2298         aninfo.flags |= MR_AN_ENABLE;
2299         aninfo.state = ANEG_STATE_UNKNOWN;
2300         aninfo.cur_time = 0;
2301         tick = 0;
2302         while (++tick < 195000) {
2303                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2304                 if (status == ANEG_DONE || status == ANEG_FAILED)
2305                         break;
2306
2307                 udelay(1);
2308         }
2309
2310         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2311         tw32_f(MAC_MODE, tp->mac_mode);
2312         udelay(40);
2313
2314         *flags = aninfo.flags;
2315
2316         if (status == ANEG_DONE &&
2317             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2318                              MR_LP_ADV_FULL_DUPLEX)))
2319                 res = 1;
2320
2321         return res;
2322 }
2323
2324 static void tg3_init_bcm8002(struct tg3 *tp)
2325 {
2326         u32 mac_status = tr32(MAC_STATUS);
2327         int i;
2328
2329         /* Reset when initting first time or we have a link. */
2330         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2331             !(mac_status & MAC_STATUS_PCS_SYNCED))
2332                 return;
2333
2334         /* Set PLL lock range. */
2335         tg3_writephy(tp, 0x16, 0x8007);
2336
2337         /* SW reset */
2338         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2339
2340         /* Wait for reset to complete. */
2341         /* XXX schedule_timeout() ... */
2342         for (i = 0; i < 500; i++)
2343                 udelay(10);
2344
2345         /* Config mode; select PMA/Ch 1 regs. */
2346         tg3_writephy(tp, 0x10, 0x8411);
2347
2348         /* Enable auto-lock and comdet, select txclk for tx. */
2349         tg3_writephy(tp, 0x11, 0x0a10);
2350
2351         tg3_writephy(tp, 0x18, 0x00a0);
2352         tg3_writephy(tp, 0x16, 0x41ff);
2353
2354         /* Assert and deassert POR. */
2355         tg3_writephy(tp, 0x13, 0x0400);
2356         udelay(40);
2357         tg3_writephy(tp, 0x13, 0x0000);
2358
2359         tg3_writephy(tp, 0x11, 0x0a50);
2360         udelay(40);
2361         tg3_writephy(tp, 0x11, 0x0a10);
2362
2363         /* Wait for signal to stabilize */
2364         /* XXX schedule_timeout() ... */
2365         for (i = 0; i < 15000; i++)
2366                 udelay(10);
2367
2368         /* Deselect the channel register so we can read the PHYID
2369          * later.
2370          */
2371         tg3_writephy(tp, 0x10, 0x8011);
2372 }
2373
2374 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2375 {
2376         u32 sg_dig_ctrl, sg_dig_status;
2377         u32 serdes_cfg, expected_sg_dig_ctrl;
2378         int workaround, port_a;
2379         int current_link_up;
2380
2381         serdes_cfg = 0;
2382         expected_sg_dig_ctrl = 0;
2383         workaround = 0;
2384         port_a = 1;
2385         current_link_up = 0;
2386
2387         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2388             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2389                 workaround = 1;
2390                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2391                         port_a = 0;
2392
2393                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2394                 /* preserve bits 20-23 for voltage regulator */
2395                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2396         }
2397
2398         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2399
2400         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2401                 if (sg_dig_ctrl & (1 << 31)) {
2402                         if (workaround) {
2403                                 u32 val = serdes_cfg;
2404
2405                                 if (port_a)
2406                                         val |= 0xc010000;
2407                                 else
2408                                         val |= 0x4010000;
2409                                 tw32_f(MAC_SERDES_CFG, val);
2410                         }
2411                         tw32_f(SG_DIG_CTRL, 0x01388400);
2412                 }
2413                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2414                         tg3_setup_flow_control(tp, 0, 0);
2415                         current_link_up = 1;
2416                 }
2417                 goto out;
2418         }
2419
2420         /* Want auto-negotiation.  */
2421         expected_sg_dig_ctrl = 0x81388400;
2422
2423         /* Pause capability */
2424         expected_sg_dig_ctrl |= (1 << 11);
2425
2426         /* Asymettric pause */
2427         expected_sg_dig_ctrl |= (1 << 12);
2428
2429         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2430                 if (workaround)
2431                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2432                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2433                 udelay(5);
2434                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2435
2436                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2437         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2438                                  MAC_STATUS_SIGNAL_DET)) {
2439                 int i;
2440
2441                 /* Giver time to negotiate (~200ms) */
2442                 for (i = 0; i < 40000; i++) {
2443                         sg_dig_status = tr32(SG_DIG_STATUS);
2444                         if (sg_dig_status & (0x3))
2445                                 break;
2446                         udelay(5);
2447                 }
2448                 mac_status = tr32(MAC_STATUS);
2449
2450                 if ((sg_dig_status & (1 << 1)) &&
2451                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2452                         u32 local_adv, remote_adv;
2453
2454                         local_adv = ADVERTISE_PAUSE_CAP;
2455                         remote_adv = 0;
2456                         if (sg_dig_status & (1 << 19))
2457                                 remote_adv |= LPA_PAUSE_CAP;
2458                         if (sg_dig_status & (1 << 20))
2459                                 remote_adv |= LPA_PAUSE_ASYM;
2460
2461                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2462                         current_link_up = 1;
2463                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2464                 } else if (!(sg_dig_status & (1 << 1))) {
2465                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2466                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2467                         else {
2468                                 if (workaround) {
2469                                         u32 val = serdes_cfg;
2470
2471                                         if (port_a)
2472                                                 val |= 0xc010000;
2473                                         else
2474                                                 val |= 0x4010000;
2475
2476                                         tw32_f(MAC_SERDES_CFG, val);
2477                                 }
2478
2479                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2480                                 udelay(40);
2481
2482                                 /* Link parallel detection - link is up */
2483                                 /* only if we have PCS_SYNC and not */
2484                                 /* receiving config code words */
2485                                 mac_status = tr32(MAC_STATUS);
2486                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2487                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2488                                         tg3_setup_flow_control(tp, 0, 0);
2489                                         current_link_up = 1;
2490                                 }
2491                         }
2492                 }
2493         }
2494
2495 out:
2496         return current_link_up;
2497 }
2498
2499 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2500 {
2501         int current_link_up = 0;
2502
2503         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2504                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2505                 goto out;
2506         }
2507
2508         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2509                 u32 flags;
2510                 int i;
2511   
2512                 if (fiber_autoneg(tp, &flags)) {
2513                         u32 local_adv, remote_adv;
2514
2515                         local_adv = ADVERTISE_PAUSE_CAP;
2516                         remote_adv = 0;
2517                         if (flags & MR_LP_ADV_SYM_PAUSE)
2518                                 remote_adv |= LPA_PAUSE_CAP;
2519                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2520                                 remote_adv |= LPA_PAUSE_ASYM;
2521
2522                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2523
2524                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2525                         current_link_up = 1;
2526                 }
2527                 for (i = 0; i < 30; i++) {
2528                         udelay(20);
2529                         tw32_f(MAC_STATUS,
2530                                (MAC_STATUS_SYNC_CHANGED |
2531                                 MAC_STATUS_CFG_CHANGED));
2532                         udelay(40);
2533                         if ((tr32(MAC_STATUS) &
2534                              (MAC_STATUS_SYNC_CHANGED |
2535                               MAC_STATUS_CFG_CHANGED)) == 0)
2536                                 break;
2537                 }
2538
2539                 mac_status = tr32(MAC_STATUS);
2540                 if (current_link_up == 0 &&
2541                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2542                     !(mac_status & MAC_STATUS_RCVD_CFG))
2543                         current_link_up = 1;
2544         } else {
2545                 /* Forcing 1000FD link up. */
2546                 current_link_up = 1;
2547                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2548
2549                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2550                 udelay(40);
2551         }
2552
2553 out:
2554         return current_link_up;
2555 }
2556
2557 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2558 {
2559         u32 orig_pause_cfg;
2560         u16 orig_active_speed;
2561         u8 orig_active_duplex;
2562         u32 mac_status;
2563         int current_link_up;
2564         int i;
2565
2566         orig_pause_cfg =
2567                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2568                                   TG3_FLAG_TX_PAUSE));
2569         orig_active_speed = tp->link_config.active_speed;
2570         orig_active_duplex = tp->link_config.active_duplex;
2571
2572         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2573             netif_carrier_ok(tp->dev) &&
2574             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2575                 mac_status = tr32(MAC_STATUS);
2576                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2577                                MAC_STATUS_SIGNAL_DET |
2578                                MAC_STATUS_CFG_CHANGED |
2579                                MAC_STATUS_RCVD_CFG);
2580                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2581                                    MAC_STATUS_SIGNAL_DET)) {
2582                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2583                                             MAC_STATUS_CFG_CHANGED));
2584                         return 0;
2585                 }
2586         }
2587
2588         tw32_f(MAC_TX_AUTO_NEG, 0);
2589
2590         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2591         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2592         tw32_f(MAC_MODE, tp->mac_mode);
2593         udelay(40);
2594
2595         if (tp->phy_id == PHY_ID_BCM8002)
2596                 tg3_init_bcm8002(tp);
2597
2598         /* Enable link change event even when serdes polling.  */
2599         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2600         udelay(40);
2601
2602         current_link_up = 0;
2603         mac_status = tr32(MAC_STATUS);
2604
2605         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2606                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2607         else
2608                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2609
2610         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2611         tw32_f(MAC_MODE, tp->mac_mode);
2612         udelay(40);
2613
2614         tp->hw_status->status =
2615                 (SD_STATUS_UPDATED |
2616                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2617
2618         for (i = 0; i < 100; i++) {
2619                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620                                     MAC_STATUS_CFG_CHANGED));
2621                 udelay(5);
2622                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2623                                          MAC_STATUS_CFG_CHANGED)) == 0)
2624                         break;
2625         }
2626
2627         mac_status = tr32(MAC_STATUS);
2628         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2629                 current_link_up = 0;
2630                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2631                         tw32_f(MAC_MODE, (tp->mac_mode |
2632                                           MAC_MODE_SEND_CONFIGS));
2633                         udelay(1);
2634                         tw32_f(MAC_MODE, tp->mac_mode);
2635                 }
2636         }
2637
2638         if (current_link_up == 1) {
2639                 tp->link_config.active_speed = SPEED_1000;
2640                 tp->link_config.active_duplex = DUPLEX_FULL;
2641                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2642                                     LED_CTRL_LNKLED_OVERRIDE |
2643                                     LED_CTRL_1000MBPS_ON));
2644         } else {
2645                 tp->link_config.active_speed = SPEED_INVALID;
2646                 tp->link_config.active_duplex = DUPLEX_INVALID;
2647                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2648                                     LED_CTRL_LNKLED_OVERRIDE |
2649                                     LED_CTRL_TRAFFIC_OVERRIDE));
2650         }
2651
2652         if (current_link_up != netif_carrier_ok(tp->dev)) {
2653                 if (current_link_up)
2654                         netif_carrier_on(tp->dev);
2655                 else
2656                         netif_carrier_off(tp->dev);
2657                 tg3_link_report(tp);
2658         } else {
2659                 u32 now_pause_cfg =
2660                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2661                                          TG3_FLAG_TX_PAUSE);
2662                 if (orig_pause_cfg != now_pause_cfg ||
2663                     orig_active_speed != tp->link_config.active_speed ||
2664                     orig_active_duplex != tp->link_config.active_duplex)
2665                         tg3_link_report(tp);
2666         }
2667
2668         return 0;
2669 }
2670
2671 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2672 {
2673         int current_link_up, err = 0;
2674         u32 bmsr, bmcr;
2675         u16 current_speed;
2676         u8 current_duplex;
2677
2678         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2679         tw32_f(MAC_MODE, tp->mac_mode);
2680         udelay(40);
2681
2682         tw32(MAC_EVENT, 0);
2683
2684         tw32_f(MAC_STATUS,
2685              (MAC_STATUS_SYNC_CHANGED |
2686               MAC_STATUS_CFG_CHANGED |
2687               MAC_STATUS_MI_COMPLETION |
2688               MAC_STATUS_LNKSTATE_CHANGED));
2689         udelay(40);
2690
2691         if (force_reset)
2692                 tg3_phy_reset(tp);
2693
2694         current_link_up = 0;
2695         current_speed = SPEED_INVALID;
2696         current_duplex = DUPLEX_INVALID;
2697
2698         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2699         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2701                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2702                         bmsr |= BMSR_LSTATUS;
2703                 else
2704                         bmsr &= ~BMSR_LSTATUS;
2705         }
2706
2707         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2708
2709         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2710             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2711                 /* do nothing, just check for link up at the end */
2712         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2713                 u32 adv, new_adv;
2714
2715                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2716                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2717                                   ADVERTISE_1000XPAUSE |
2718                                   ADVERTISE_1000XPSE_ASYM |
2719                                   ADVERTISE_SLCT);
2720
2721                 /* Always advertise symmetric PAUSE just like copper */
2722                 new_adv |= ADVERTISE_1000XPAUSE;
2723
2724                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2725                         new_adv |= ADVERTISE_1000XHALF;
2726                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2727                         new_adv |= ADVERTISE_1000XFULL;
2728
2729                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2730                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2731                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2732                         tg3_writephy(tp, MII_BMCR, bmcr);
2733
2734                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2735                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2736                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2737
2738                         return err;
2739                 }
2740         } else {
2741                 u32 new_bmcr;
2742
2743                 bmcr &= ~BMCR_SPEED1000;
2744                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2745
2746                 if (tp->link_config.duplex == DUPLEX_FULL)
2747                         new_bmcr |= BMCR_FULLDPLX;
2748
2749                 if (new_bmcr != bmcr) {
2750                         /* BMCR_SPEED1000 is a reserved bit that needs
2751                          * to be set on write.
2752                          */
2753                         new_bmcr |= BMCR_SPEED1000;
2754
2755                         /* Force a linkdown */
2756                         if (netif_carrier_ok(tp->dev)) {
2757                                 u32 adv;
2758
2759                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2760                                 adv &= ~(ADVERTISE_1000XFULL |
2761                                          ADVERTISE_1000XHALF |
2762                                          ADVERTISE_SLCT);
2763                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2764                                 tg3_writephy(tp, MII_BMCR, bmcr |
2765                                                            BMCR_ANRESTART |
2766                                                            BMCR_ANENABLE);
2767                                 udelay(10);
2768                                 netif_carrier_off(tp->dev);
2769                         }
2770                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2771                         bmcr = new_bmcr;
2772                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2773                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2774                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2775                             ASIC_REV_5714) {
2776                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2777                                         bmsr |= BMSR_LSTATUS;
2778                                 else
2779                                         bmsr &= ~BMSR_LSTATUS;
2780                         }
2781                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2782                 }
2783         }
2784
2785         if (bmsr & BMSR_LSTATUS) {
2786                 current_speed = SPEED_1000;
2787                 current_link_up = 1;
2788                 if (bmcr & BMCR_FULLDPLX)
2789                         current_duplex = DUPLEX_FULL;
2790                 else
2791                         current_duplex = DUPLEX_HALF;
2792
2793                 if (bmcr & BMCR_ANENABLE) {
2794                         u32 local_adv, remote_adv, common;
2795
2796                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2797                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2798                         common = local_adv & remote_adv;
2799                         if (common & (ADVERTISE_1000XHALF |
2800                                       ADVERTISE_1000XFULL)) {
2801                                 if (common & ADVERTISE_1000XFULL)
2802                                         current_duplex = DUPLEX_FULL;
2803                                 else
2804                                         current_duplex = DUPLEX_HALF;
2805
2806                                 tg3_setup_flow_control(tp, local_adv,
2807                                                        remote_adv);
2808                         }
2809                         else
2810                                 current_link_up = 0;
2811                 }
2812         }
2813
2814         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2815         if (tp->link_config.active_duplex == DUPLEX_HALF)
2816                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2817
2818         tw32_f(MAC_MODE, tp->mac_mode);
2819         udelay(40);
2820
2821         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822
2823         tp->link_config.active_speed = current_speed;
2824         tp->link_config.active_duplex = current_duplex;
2825
2826         if (current_link_up != netif_carrier_ok(tp->dev)) {
2827                 if (current_link_up)
2828                         netif_carrier_on(tp->dev);
2829                 else {
2830                         netif_carrier_off(tp->dev);
2831                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2832                 }
2833                 tg3_link_report(tp);
2834         }
2835         return err;
2836 }
2837
2838 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2839 {
2840         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2841                 /* Give autoneg time to complete. */
2842                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2843                 return;
2844         }
2845         if (!netif_carrier_ok(tp->dev) &&
2846             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2847                 u32 bmcr;
2848
2849                 tg3_readphy(tp, MII_BMCR, &bmcr);
2850                 if (bmcr & BMCR_ANENABLE) {
2851                         u32 phy1, phy2;
2852
2853                         /* Select shadow register 0x1f */
2854                         tg3_writephy(tp, 0x1c, 0x7c00);
2855                         tg3_readphy(tp, 0x1c, &phy1);
2856
2857                         /* Select expansion interrupt status register */
2858                         tg3_writephy(tp, 0x17, 0x0f01);
2859                         tg3_readphy(tp, 0x15, &phy2);
2860                         tg3_readphy(tp, 0x15, &phy2);
2861
2862                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2863                                 /* We have signal detect and not receiving
2864                                  * config code words, link is up by parallel
2865                                  * detection.
2866                                  */
2867
2868                                 bmcr &= ~BMCR_ANENABLE;
2869                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2870                                 tg3_writephy(tp, MII_BMCR, bmcr);
2871                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2872                         }
2873                 }
2874         }
2875         else if (netif_carrier_ok(tp->dev) &&
2876                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2877                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2878                 u32 phy2;
2879
2880                 /* Select expansion interrupt status register */
2881                 tg3_writephy(tp, 0x17, 0x0f01);
2882                 tg3_readphy(tp, 0x15, &phy2);
2883                 if (phy2 & 0x20) {
2884                         u32 bmcr;
2885
2886                         /* Config code words received, turn on autoneg. */
2887                         tg3_readphy(tp, MII_BMCR, &bmcr);
2888                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2889
2890                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2891
2892                 }
2893         }
2894 }
2895
2896 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2897 {
2898         int err;
2899
2900         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2901                 err = tg3_setup_fiber_phy(tp, force_reset);
2902         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2903                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2904         } else {
2905                 err = tg3_setup_copper_phy(tp, force_reset);
2906         }
2907
2908         if (tp->link_config.active_speed == SPEED_1000 &&
2909             tp->link_config.active_duplex == DUPLEX_HALF)
2910                 tw32(MAC_TX_LENGTHS,
2911                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2912                       (6 << TX_LENGTHS_IPG_SHIFT) |
2913                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2914         else
2915                 tw32(MAC_TX_LENGTHS,
2916                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2917                       (6 << TX_LENGTHS_IPG_SHIFT) |
2918                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2919
2920         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2921                 if (netif_carrier_ok(tp->dev)) {
2922                         tw32(HOSTCC_STAT_COAL_TICKS,
2923                              tp->coal.stats_block_coalesce_usecs);
2924                 } else {
2925                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2926                 }
2927         }
2928
2929         return err;
2930 }
2931
2932 /* Tigon3 never reports partial packet sends.  So we do not
2933  * need special logic to handle SKBs that have not had all
2934  * of their frags sent yet, like SunGEM does.
2935  */
2936 static void tg3_tx(struct tg3 *tp)
2937 {
2938         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2939         u32 sw_idx = tp->tx_cons;
2940
2941         while (sw_idx != hw_idx) {
2942                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2943                 struct sk_buff *skb = ri->skb;
2944                 int i;
2945
2946                 if (unlikely(skb == NULL))
2947                         BUG();
2948
2949                 pci_unmap_single(tp->pdev,
2950                                  pci_unmap_addr(ri, mapping),
2951                                  skb_headlen(skb),
2952                                  PCI_DMA_TODEVICE);
2953
2954                 ri->skb = NULL;
2955
2956                 sw_idx = NEXT_TX(sw_idx);
2957
2958                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2959                         if (unlikely(sw_idx == hw_idx))
2960                                 BUG();
2961
2962                         ri = &tp->tx_buffers[sw_idx];
2963                         if (unlikely(ri->skb != NULL))
2964                                 BUG();
2965
2966                         pci_unmap_page(tp->pdev,
2967                                        pci_unmap_addr(ri, mapping),
2968                                        skb_shinfo(skb)->frags[i].size,
2969                                        PCI_DMA_TODEVICE);
2970
2971                         sw_idx = NEXT_TX(sw_idx);
2972                 }
2973
2974                 dev_kfree_skb(skb);
2975         }
2976
2977         tp->tx_cons = sw_idx;
2978
2979         if (unlikely(netif_queue_stopped(tp->dev))) {
2980                 spin_lock(&tp->tx_lock);
2981                 if (netif_queue_stopped(tp->dev) &&
2982                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2983                         netif_wake_queue(tp->dev);
2984                 spin_unlock(&tp->tx_lock);
2985         }
2986 }
2987
2988 /* Returns size of skb allocated or < 0 on error.
2989  *
2990  * We only need to fill in the address because the other members
2991  * of the RX descriptor are invariant, see tg3_init_rings.
2992  *
2993  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2994  * posting buffers we only dirty the first cache line of the RX
2995  * descriptor (containing the address).  Whereas for the RX status
2996  * buffers the cpu only reads the last cacheline of the RX descriptor
2997  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2998  */
2999 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3000                             int src_idx, u32 dest_idx_unmasked)
3001 {
3002         struct tg3_rx_buffer_desc *desc;
3003         struct ring_info *map, *src_map;
3004         struct sk_buff *skb;
3005         dma_addr_t mapping;
3006         int skb_size, dest_idx;
3007
3008         src_map = NULL;
3009         switch (opaque_key) {
3010         case RXD_OPAQUE_RING_STD:
3011                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3012                 desc = &tp->rx_std[dest_idx];
3013                 map = &tp->rx_std_buffers[dest_idx];
3014                 if (src_idx >= 0)
3015                         src_map = &tp->rx_std_buffers[src_idx];
3016                 skb_size = tp->rx_pkt_buf_sz;
3017                 break;
3018
3019         case RXD_OPAQUE_RING_JUMBO:
3020                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3021                 desc = &tp->rx_jumbo[dest_idx];
3022                 map = &tp->rx_jumbo_buffers[dest_idx];
3023                 if (src_idx >= 0)
3024                         src_map = &tp->rx_jumbo_buffers[src_idx];
3025                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3026                 break;
3027
3028         default:
3029                 return -EINVAL;
3030         };
3031
3032         /* Do not overwrite any of the map or rp information
3033          * until we are sure we can commit to a new buffer.
3034          *
3035          * Callers depend upon this behavior and assume that
3036          * we leave everything unchanged if we fail.
3037          */
3038         skb = dev_alloc_skb(skb_size);
3039         if (skb == NULL)
3040                 return -ENOMEM;
3041
3042         skb->dev = tp->dev;
3043         skb_reserve(skb, tp->rx_offset);
3044
3045         mapping = pci_map_single(tp->pdev, skb->data,
3046                                  skb_size - tp->rx_offset,
3047                                  PCI_DMA_FROMDEVICE);
3048
3049         map->skb = skb;
3050         pci_unmap_addr_set(map, mapping, mapping);
3051
3052         if (src_map != NULL)
3053                 src_map->skb = NULL;
3054
3055         desc->addr_hi = ((u64)mapping >> 32);
3056         desc->addr_lo = ((u64)mapping & 0xffffffff);
3057
3058         return skb_size;
3059 }
3060
3061 /* We only need to move over in the address because the other
3062  * members of the RX descriptor are invariant.  See notes above
3063  * tg3_alloc_rx_skb for full details.
3064  */
3065 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3066                            int src_idx, u32 dest_idx_unmasked)
3067 {
3068         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3069         struct ring_info *src_map, *dest_map;
3070         int dest_idx;
3071
3072         switch (opaque_key) {
3073         case RXD_OPAQUE_RING_STD:
3074                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3075                 dest_desc = &tp->rx_std[dest_idx];
3076                 dest_map = &tp->rx_std_buffers[dest_idx];
3077                 src_desc = &tp->rx_std[src_idx];
3078                 src_map = &tp->rx_std_buffers[src_idx];
3079                 break;
3080
3081         case RXD_OPAQUE_RING_JUMBO:
3082                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083                 dest_desc = &tp->rx_jumbo[dest_idx];
3084                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3085                 src_desc = &tp->rx_jumbo[src_idx];
3086                 src_map = &tp->rx_jumbo_buffers[src_idx];
3087                 break;
3088
3089         default:
3090                 return;
3091         };
3092
3093         dest_map->skb = src_map->skb;
3094         pci_unmap_addr_set(dest_map, mapping,
3095                            pci_unmap_addr(src_map, mapping));
3096         dest_desc->addr_hi = src_desc->addr_hi;
3097         dest_desc->addr_lo = src_desc->addr_lo;
3098
3099         src_map->skb = NULL;
3100 }
3101
3102 #if TG3_VLAN_TAG_USED
3103 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3104 {
3105         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3106 }
3107 #endif
3108
3109 /* The RX ring scheme is composed of multiple rings which post fresh
3110  * buffers to the chip, and one special ring the chip uses to report
3111  * status back to the host.
3112  *
3113  * The special ring reports the status of received packets to the
3114  * host.  The chip does not write into the original descriptor the
3115  * RX buffer was obtained from.  The chip simply takes the original
3116  * descriptor as provided by the host, updates the status and length
3117  * field, then writes this into the next status ring entry.
3118  *
3119  * Each ring the host uses to post buffers to the chip is described
3120  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3121  * it is first placed into the on-chip ram.  When the packet's length
3122  * is known, it walks down the TG3_BDINFO entries to select the ring.
3123  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3124  * which is within the range of the new packet's length is chosen.
3125  *
3126  * The "separate ring for rx status" scheme may sound queer, but it makes
3127  * sense from a cache coherency perspective.  If only the host writes
3128  * to the buffer post rings, and only the chip writes to the rx status
3129  * rings, then cache lines never move beyond shared-modified state.
3130  * If both the host and chip were to write into the same ring, cache line
3131  * eviction could occur since both entities want it in an exclusive state.
3132  */
3133 static int tg3_rx(struct tg3 *tp, int budget)
3134 {
3135         u32 work_mask;
3136         u32 sw_idx = tp->rx_rcb_ptr;
3137         u16 hw_idx;
3138         int received;
3139
3140         hw_idx = tp->hw_status->idx[0].rx_producer;
3141         /*
3142          * We need to order the read of hw_idx and the read of
3143          * the opaque cookie.
3144          */
3145         rmb();
3146         work_mask = 0;
3147         received = 0;
3148         while (sw_idx != hw_idx && budget > 0) {
3149                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3150                 unsigned int len;
3151                 struct sk_buff *skb;
3152                 dma_addr_t dma_addr;
3153                 u32 opaque_key, desc_idx, *post_ptr;
3154
3155                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3156                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3157                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3158                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3159                                                   mapping);
3160                         skb = tp->rx_std_buffers[desc_idx].skb;
3161                         post_ptr = &tp->rx_std_ptr;
3162                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3163                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3164                                                   mapping);
3165                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3166                         post_ptr = &tp->rx_jumbo_ptr;
3167                 }
3168                 else {
3169                         goto next_pkt_nopost;
3170                 }
3171
3172                 work_mask |= opaque_key;
3173
3174                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3175                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3176                 drop_it:
3177                         tg3_recycle_rx(tp, opaque_key,
3178                                        desc_idx, *post_ptr);
3179                 drop_it_no_recycle:
3180                         /* Other statistics kept track of by card. */
3181                         tp->net_stats.rx_dropped++;
3182                         goto next_pkt;
3183                 }
3184
3185                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3186
3187                 if (len > RX_COPY_THRESHOLD 
3188                         && tp->rx_offset == 2
3189                         /* rx_offset != 2 iff this is a 5701 card running
3190                          * in PCI-X mode [see tg3_get_invariants()] */
3191                 ) {
3192                         int skb_size;
3193
3194                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3195                                                     desc_idx, *post_ptr);
3196                         if (skb_size < 0)
3197                                 goto drop_it;
3198
3199                         pci_unmap_single(tp->pdev, dma_addr,
3200                                          skb_size - tp->rx_offset,
3201                                          PCI_DMA_FROMDEVICE);
3202
3203                         skb_put(skb, len);
3204                 } else {
3205                         struct sk_buff *copy_skb;
3206
3207                         tg3_recycle_rx(tp, opaque_key,
3208                                        desc_idx, *post_ptr);
3209
3210                         copy_skb = dev_alloc_skb(len + 2);
3211                         if (copy_skb == NULL)
3212                                 goto drop_it_no_recycle;
3213
3214                         copy_skb->dev = tp->dev;
3215                         skb_reserve(copy_skb, 2);
3216                         skb_put(copy_skb, len);
3217                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3218                         memcpy(copy_skb->data, skb->data, len);
3219                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3220
3221                         /* We'll reuse the original ring buffer. */
3222                         skb = copy_skb;
3223                 }
3224
3225                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3226                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3227                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3228                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3229                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3230                 else
3231                         skb->ip_summed = CHECKSUM_NONE;
3232
3233                 skb->protocol = eth_type_trans(skb, tp->dev);
3234 #if TG3_VLAN_TAG_USED
3235                 if (tp->vlgrp != NULL &&
3236                     desc->type_flags & RXD_FLAG_VLAN) {
3237                         tg3_vlan_rx(tp, skb,
3238                                     desc->err_vlan & RXD_VLAN_MASK);
3239                 } else
3240 #endif
3241                         netif_receive_skb(skb);
3242
3243                 tp->dev->last_rx = jiffies;
3244                 received++;
3245                 budget--;
3246
3247 next_pkt:
3248                 (*post_ptr)++;
3249 next_pkt_nopost:
3250                 sw_idx++;
3251                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3252
3253                 /* Refresh hw_idx to see if there is new work */
3254                 if (sw_idx == hw_idx) {
3255                         hw_idx = tp->hw_status->idx[0].rx_producer;
3256                         rmb();
3257                 }
3258         }
3259
3260         /* ACK the status ring. */
3261         tp->rx_rcb_ptr = sw_idx;
3262         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3263
3264         /* Refill RX ring(s). */
3265         if (work_mask & RXD_OPAQUE_RING_STD) {
3266                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3267                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3268                              sw_idx);
3269         }
3270         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3271                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3272                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3273                              sw_idx);
3274         }
3275         mmiowb();
3276
3277         return received;
3278 }
3279
3280 static int tg3_poll(struct net_device *netdev, int *budget)
3281 {
3282         struct tg3 *tp = netdev_priv(netdev);
3283         struct tg3_hw_status *sblk = tp->hw_status;
3284         int done;
3285
3286         /* handle link change and other phy events */
3287         if (!(tp->tg3_flags &
3288               (TG3_FLAG_USE_LINKCHG_REG |
3289                TG3_FLAG_POLL_SERDES))) {
3290                 if (sblk->status & SD_STATUS_LINK_CHG) {
3291                         sblk->status = SD_STATUS_UPDATED |
3292                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3293                         spin_lock(&tp->lock);
3294                         tg3_setup_phy(tp, 0);
3295                         spin_unlock(&tp->lock);
3296                 }
3297         }
3298
3299         /* run TX completion thread */
3300         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3301                 tg3_tx(tp);
3302         }
3303
3304         /* run RX thread, within the bounds set by NAPI.
3305          * All RX "locking" is done by ensuring outside
3306          * code synchronizes with dev->poll()
3307          */
3308         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3309                 int orig_budget = *budget;
3310                 int work_done;
3311
3312                 if (orig_budget > netdev->quota)
3313                         orig_budget = netdev->quota;
3314
3315                 work_done = tg3_rx(tp, orig_budget);
3316
3317                 *budget -= work_done;
3318                 netdev->quota -= work_done;
3319         }
3320
3321         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3322                 tp->last_tag = sblk->status_tag;
3323                 rmb();
3324         } else
3325                 sblk->status &= ~SD_STATUS_UPDATED;
3326
3327         /* if no more work, tell net stack and NIC we're done */
3328         done = !tg3_has_work(tp);
3329         if (done) {
3330                 netif_rx_complete(netdev);
3331                 tg3_restart_ints(tp);
3332         }
3333
3334         return (done ? 0 : 1);
3335 }
3336
3337 static void tg3_irq_quiesce(struct tg3 *tp)
3338 {
3339         BUG_ON(tp->irq_sync);
3340
3341         tp->irq_sync = 1;
3342         smp_mb();
3343
3344         synchronize_irq(tp->pdev->irq);
3345 }
3346
3347 static inline int tg3_irq_sync(struct tg3 *tp)
3348 {
3349         return tp->irq_sync;
3350 }
3351
3352 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3353  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3354  * with as well.  Most of the time, this is not necessary except when
3355  * shutting down the device.
3356  */
3357 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3358 {
3359         if (irq_sync)
3360                 tg3_irq_quiesce(tp);
3361         spin_lock_bh(&tp->lock);
3362         spin_lock(&tp->tx_lock);
3363 }
3364
3365 static inline void tg3_full_unlock(struct tg3 *tp)
3366 {
3367         spin_unlock(&tp->tx_lock);
3368         spin_unlock_bh(&tp->lock);
3369 }
3370
3371 /* One-shot MSI handler - Chip automatically disables interrupt
3372  * after sending MSI so driver doesn't have to do it.
3373  */
3374 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3375 {
3376         struct net_device *dev = dev_id;
3377         struct tg3 *tp = netdev_priv(dev);
3378
3379         prefetch(tp->hw_status);
3380         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3381
3382         if (likely(!tg3_irq_sync(tp)))
3383                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3384
3385         return IRQ_HANDLED;
3386 }
3387
3388 /* MSI ISR - No need to check for interrupt sharing and no need to
3389  * flush status block and interrupt mailbox. PCI ordering rules
3390  * guarantee that MSI will arrive after the status block.
3391  */
3392 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3393 {
3394         struct net_device *dev = dev_id;
3395         struct tg3 *tp = netdev_priv(dev);
3396
3397         prefetch(tp->hw_status);
3398         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3399         /*
3400          * Writing any value to intr-mbox-0 clears PCI INTA# and
3401          * chip-internal interrupt pending events.
3402          * Writing non-zero to intr-mbox-0 additional tells the
3403          * NIC to stop sending us irqs, engaging "in-intr-handler"
3404          * event coalescing.
3405          */
3406         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3407         if (likely(!tg3_irq_sync(tp)))
3408                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3409
3410         return IRQ_RETVAL(1);
3411 }
3412
3413 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3414 {
3415         struct net_device *dev = dev_id;
3416         struct tg3 *tp = netdev_priv(dev);
3417         struct tg3_hw_status *sblk = tp->hw_status;
3418         unsigned int handled = 1;
3419
3420         /* In INTx mode, it is possible for the interrupt to arrive at
3421          * the CPU before the status block posted prior to the interrupt.
3422          * Reading the PCI State register will confirm whether the
3423          * interrupt is ours and will flush the status block.
3424          */
3425         if ((sblk->status & SD_STATUS_UPDATED) ||
3426             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3427                 /*
3428                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3429                  * chip-internal interrupt pending events.
3430                  * Writing non-zero to intr-mbox-0 additional tells the
3431                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3432                  * event coalescing.
3433                  */
3434                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3435                              0x00000001);
3436                 if (tg3_irq_sync(tp))
3437                         goto out;
3438                 sblk->status &= ~SD_STATUS_UPDATED;
3439                 if (likely(tg3_has_work(tp))) {
3440                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3441                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3442                 } else {
3443                         /* No work, shared interrupt perhaps?  re-enable
3444                          * interrupts, and flush that PCI write
3445                          */
3446                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3447                                 0x00000000);
3448                 }
3449         } else {        /* shared interrupt */
3450                 handled = 0;
3451         }
3452 out:
3453         return IRQ_RETVAL(handled);
3454 }
3455
3456 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3457 {
3458         struct net_device *dev = dev_id;
3459         struct tg3 *tp = netdev_priv(dev);
3460         struct tg3_hw_status *sblk = tp->hw_status;
3461         unsigned int handled = 1;
3462
3463         /* In INTx mode, it is possible for the interrupt to arrive at
3464          * the CPU before the status block posted prior to the interrupt.
3465          * Reading the PCI State register will confirm whether the
3466          * interrupt is ours and will flush the status block.
3467          */
3468         if ((sblk->status_tag != tp->last_tag) ||
3469             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3470                 /*
3471                  * writing any value to intr-mbox-0 clears PCI INTA# and
3472                  * chip-internal interrupt pending events.
3473                  * writing non-zero to intr-mbox-0 additional tells the
3474                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3475                  * event coalescing.
3476                  */
3477                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3478                              0x00000001);
3479                 if (tg3_irq_sync(tp))
3480                         goto out;
3481                 if (netif_rx_schedule_prep(dev)) {
3482                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3483                         /* Update last_tag to mark that this status has been
3484                          * seen. Because interrupt may be shared, we may be
3485                          * racing with tg3_poll(), so only update last_tag
3486                          * if tg3_poll() is not scheduled.
3487                          */
3488                         tp->last_tag = sblk->status_tag;
3489                         __netif_rx_schedule(dev);
3490                 }
3491         } else {        /* shared interrupt */
3492                 handled = 0;
3493         }
3494 out:
3495         return IRQ_RETVAL(handled);
3496 }
3497
3498 /* ISR for interrupt test */
3499 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3500                 struct pt_regs *regs)
3501 {
3502         struct net_device *dev = dev_id;
3503         struct tg3 *tp = netdev_priv(dev);
3504         struct tg3_hw_status *sblk = tp->hw_status;
3505
3506         if ((sblk->status & SD_STATUS_UPDATED) ||
3507             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3508                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3509                              0x00000001);
3510                 return IRQ_RETVAL(1);
3511         }
3512         return IRQ_RETVAL(0);
3513 }
3514
3515 static int tg3_init_hw(struct tg3 *);
3516 static int tg3_halt(struct tg3 *, int, int);
3517
3518 #ifdef CONFIG_NET_POLL_CONTROLLER
3519 static void tg3_poll_controller(struct net_device *dev)
3520 {
3521         struct tg3 *tp = netdev_priv(dev);
3522
3523         tg3_interrupt(tp->pdev->irq, dev, NULL);
3524 }
3525 #endif
3526
3527 static void tg3_reset_task(void *_data)
3528 {
3529         struct tg3 *tp = _data;
3530         unsigned int restart_timer;
3531
3532         tg3_full_lock(tp, 0);
3533         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3534
3535         if (!netif_running(tp->dev)) {
3536                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3537                 tg3_full_unlock(tp);
3538                 return;
3539         }
3540
3541         tg3_full_unlock(tp);
3542
3543         tg3_netif_stop(tp);
3544
3545         tg3_full_lock(tp, 1);
3546
3547         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3548         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3549
3550         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3551         tg3_init_hw(tp);
3552
3553         tg3_netif_start(tp);
3554
3555         if (restart_timer)
3556                 mod_timer(&tp->timer, jiffies + 1);
3557
3558         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3559
3560         tg3_full_unlock(tp);
3561 }
3562
3563 static void tg3_tx_timeout(struct net_device *dev)
3564 {
3565         struct tg3 *tp = netdev_priv(dev);
3566
3567         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3568                dev->name);
3569
3570         schedule_work(&tp->reset_task);
3571 }
3572
3573 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3574 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3575 {
3576         u32 base = (u32) mapping & 0xffffffff;
3577
3578         return ((base > 0xffffdcc0) &&
3579                 (base + len + 8 < base));
3580 }
3581
3582 /* Test for DMA addresses > 40-bit */
3583 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3584                                           int len)
3585 {
3586 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3587         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3588                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3589         return 0;
3590 #else
3591         return 0;
3592 #endif
3593 }
3594
3595 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3596
3597 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3598 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3599                                        u32 last_plus_one, u32 *start,
3600                                        u32 base_flags, u32 mss)
3601 {
3602         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3603         dma_addr_t new_addr = 0;
3604         u32 entry = *start;
3605         int i, ret = 0;
3606
3607         if (!new_skb) {
3608                 ret = -1;
3609         } else {
3610                 /* New SKB is guaranteed to be linear. */
3611                 entry = *start;
3612                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3613                                           PCI_DMA_TODEVICE);
3614                 /* Make sure new skb does not cross any 4G boundaries.
3615                  * Drop the packet if it does.
3616                  */
3617                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3618                         ret = -1;
3619                         dev_kfree_skb(new_skb);
3620                         new_skb = NULL;
3621                 } else {
3622                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3623                                     base_flags, 1 | (mss << 1));
3624                         *start = NEXT_TX(entry);
3625                 }
3626         }
3627
3628         /* Now clean up the sw ring entries. */
3629         i = 0;
3630         while (entry != last_plus_one) {
3631                 int len;
3632
3633                 if (i == 0)
3634                         len = skb_headlen(skb);
3635                 else
3636                         len = skb_shinfo(skb)->frags[i-1].size;
3637                 pci_unmap_single(tp->pdev,
3638                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3639                                  len, PCI_DMA_TODEVICE);
3640                 if (i == 0) {
3641                         tp->tx_buffers[entry].skb = new_skb;
3642                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3643                 } else {
3644                         tp->tx_buffers[entry].skb = NULL;
3645                 }
3646                 entry = NEXT_TX(entry);
3647                 i++;
3648         }
3649
3650         dev_kfree_skb(skb);
3651
3652         return ret;
3653 }
3654
3655 static void tg3_set_txd(struct tg3 *tp, int entry,
3656                         dma_addr_t mapping, int len, u32 flags,
3657                         u32 mss_and_is_end)
3658 {
3659         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3660         int is_end = (mss_and_is_end & 0x1);
3661         u32 mss = (mss_and_is_end >> 1);
3662         u32 vlan_tag = 0;
3663
3664         if (is_end)
3665                 flags |= TXD_FLAG_END;
3666         if (flags & TXD_FLAG_VLAN) {
3667                 vlan_tag = flags >> 16;
3668                 flags &= 0xffff;
3669         }
3670         vlan_tag |= (mss << TXD_MSS_SHIFT);
3671
3672         txd->addr_hi = ((u64) mapping >> 32);
3673         txd->addr_lo = ((u64) mapping & 0xffffffff);
3674         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3675         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3676 }
3677
3678 /* hard_start_xmit for devices that don't have any bugs and
3679  * support TG3_FLG2_HW_TSO_2 only.
3680  */
3681 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3682 {
3683         struct tg3 *tp = netdev_priv(dev);
3684         dma_addr_t mapping;
3685         u32 len, entry, base_flags, mss;
3686
3687         len = skb_headlen(skb);
3688
3689         /* No BH disabling for tx_lock here.  We are running in BH disabled
3690          * context and TX reclaim runs via tp->poll inside of a software
3691          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3692          * no IRQ context deadlocks to worry about either.  Rejoice!
3693          */
3694         if (!spin_trylock(&tp->tx_lock))
3695                 return NETDEV_TX_LOCKED;
3696
3697         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3698                 if (!netif_queue_stopped(dev)) {
3699                         netif_stop_queue(dev);
3700
3701                         /* This is a hard error, log it. */
3702                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3703                                "queue awake!\n", dev->name);
3704                 }
3705                 spin_unlock(&tp->tx_lock);
3706                 return NETDEV_TX_BUSY;
3707         }
3708
3709         entry = tp->tx_prod;
3710         base_flags = 0;
3711 #if TG3_TSO_SUPPORT != 0
3712         mss = 0;
3713         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3714             (mss = skb_shinfo(skb)->tso_size) != 0) {
3715                 int tcp_opt_len, ip_tcp_len;
3716
3717                 if (skb_header_cloned(skb) &&
3718                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3719                         dev_kfree_skb(skb);
3720                         goto out_unlock;
3721                 }
3722
3723                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3724                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3725
3726                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3727                                TXD_FLAG_CPU_POST_DMA);
3728
3729                 skb->nh.iph->check = 0;
3730                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3731
3732                 skb->h.th->check = 0;
3733
3734                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3735         }
3736         else if (skb->ip_summed == CHECKSUM_HW)
3737                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3738 #else
3739         mss = 0;
3740         if (skb->ip_summed == CHECKSUM_HW)
3741                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3742 #endif
3743 #if TG3_VLAN_TAG_USED
3744         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3745                 base_flags |= (TXD_FLAG_VLAN |
3746                                (vlan_tx_tag_get(skb) << 16));
3747 #endif
3748
3749         /* Queue skb data, a.k.a. the main skb fragment. */
3750         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3751
3752         tp->tx_buffers[entry].skb = skb;
3753         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3754
3755         tg3_set_txd(tp, entry, mapping, len, base_flags,
3756                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3757
3758         entry = NEXT_TX(entry);
3759
3760         /* Now loop through additional data fragments, and queue them. */
3761         if (skb_shinfo(skb)->nr_frags > 0) {
3762                 unsigned int i, last;
3763
3764                 last = skb_shinfo(skb)->nr_frags - 1;
3765                 for (i = 0; i <= last; i++) {
3766                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3767
3768                         len = frag->size;
3769                         mapping = pci_map_page(tp->pdev,
3770                                                frag->page,
3771                                                frag->page_offset,
3772                                                len, PCI_DMA_TODEVICE);
3773
3774                         tp->tx_buffers[entry].skb = NULL;
3775                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3776
3777                         tg3_set_txd(tp, entry, mapping, len,
3778                                     base_flags, (i == last) | (mss << 1));
3779
3780                         entry = NEXT_TX(entry);
3781                 }
3782         }
3783
3784         /* Packets are ready, update Tx producer idx local and on card. */
3785         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3786
3787         tp->tx_prod = entry;
3788         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3789                 netif_stop_queue(dev);
3790                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3791                         netif_wake_queue(tp->dev);
3792         }
3793
3794 out_unlock:
3795         mmiowb();
3796         spin_unlock(&tp->tx_lock);
3797
3798         dev->trans_start = jiffies;
3799
3800         return NETDEV_TX_OK;
3801 }
3802
3803 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3804  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3805  */
3806 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3807 {
3808         struct tg3 *tp = netdev_priv(dev);
3809         dma_addr_t mapping;
3810         u32 len, entry, base_flags, mss;
3811         int would_hit_hwbug;
3812
3813         len = skb_headlen(skb);
3814
3815         /* No BH disabling for tx_lock here.  We are running in BH disabled
3816          * context and TX reclaim runs via tp->poll inside of a software
3817          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3818          * no IRQ context deadlocks to worry about either.  Rejoice!
3819          */
3820         if (!spin_trylock(&tp->tx_lock))
3821                 return NETDEV_TX_LOCKED; 
3822
3823         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3824                 if (!netif_queue_stopped(dev)) {
3825                         netif_stop_queue(dev);
3826
3827                         /* This is a hard error, log it. */
3828                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3829                                "queue awake!\n", dev->name);
3830                 }
3831                 spin_unlock(&tp->tx_lock);
3832                 return NETDEV_TX_BUSY;
3833         }
3834
3835         entry = tp->tx_prod;
3836         base_flags = 0;
3837         if (skb->ip_summed == CHECKSUM_HW)
3838                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3839 #if TG3_TSO_SUPPORT != 0
3840         mss = 0;
3841         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3842             (mss = skb_shinfo(skb)->tso_size) != 0) {
3843                 int tcp_opt_len, ip_tcp_len;
3844
3845                 if (skb_header_cloned(skb) &&
3846                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3847                         dev_kfree_skb(skb);
3848                         goto out_unlock;
3849                 }
3850
3851                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3852                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3853
3854                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3855                                TXD_FLAG_CPU_POST_DMA);
3856
3857                 skb->nh.iph->check = 0;
3858                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3859                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3860                         skb->h.th->check = 0;
3861                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3862                 }
3863                 else {
3864                         skb->h.th->check =
3865                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3866                                                    skb->nh.iph->daddr,
3867                                                    0, IPPROTO_TCP, 0);
3868                 }
3869
3870                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3871                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3872                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3873                                 int tsflags;
3874
3875                                 tsflags = ((skb->nh.iph->ihl - 5) +
3876                                            (tcp_opt_len >> 2));
3877                                 mss |= (tsflags << 11);
3878                         }
3879                 } else {
3880                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3881                                 int tsflags;
3882
3883                                 tsflags = ((skb->nh.iph->ihl - 5) +
3884                                            (tcp_opt_len >> 2));
3885                                 base_flags |= tsflags << 12;
3886                         }
3887                 }
3888         }
3889 #else
3890         mss = 0;
3891 #endif
3892 #if TG3_VLAN_TAG_USED
3893         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3894                 base_flags |= (TXD_FLAG_VLAN |
3895                                (vlan_tx_tag_get(skb) << 16));
3896 #endif
3897
3898         /* Queue skb data, a.k.a. the main skb fragment. */
3899         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3900
3901         tp->tx_buffers[entry].skb = skb;
3902         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3903
3904         would_hit_hwbug = 0;
3905
3906         if (tg3_4g_overflow_test(mapping, len))
3907                 would_hit_hwbug = 1;
3908
3909         tg3_set_txd(tp, entry, mapping, len, base_flags,
3910                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3911
3912         entry = NEXT_TX(entry);
3913
3914         /* Now loop through additional data fragments, and queue them. */
3915         if (skb_shinfo(skb)->nr_frags > 0) {
3916                 unsigned int i, last;
3917
3918                 last = skb_shinfo(skb)->nr_frags - 1;
3919                 for (i = 0; i <= last; i++) {
3920                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3921
3922                         len = frag->size;
3923                         mapping = pci_map_page(tp->pdev,
3924                                                frag->page,
3925                                                frag->page_offset,
3926                                                len, PCI_DMA_TODEVICE);
3927
3928                         tp->tx_buffers[entry].skb = NULL;
3929                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3930
3931                         if (tg3_4g_overflow_test(mapping, len))
3932                                 would_hit_hwbug = 1;
3933
3934                         if (tg3_40bit_overflow_test(tp, mapping, len))
3935                                 would_hit_hwbug = 1;
3936
3937                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3938                                 tg3_set_txd(tp, entry, mapping, len,
3939                                             base_flags, (i == last)|(mss << 1));
3940                         else
3941                                 tg3_set_txd(tp, entry, mapping, len,
3942                                             base_flags, (i == last));
3943
3944                         entry = NEXT_TX(entry);
3945                 }
3946         }
3947
3948         if (would_hit_hwbug) {
3949                 u32 last_plus_one = entry;
3950                 u32 start;
3951
3952                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3953                 start &= (TG3_TX_RING_SIZE - 1);
3954
3955                 /* If the workaround fails due to memory/mapping
3956                  * failure, silently drop this packet.
3957                  */
3958                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3959                                                 &start, base_flags, mss))
3960                         goto out_unlock;
3961
3962                 entry = start;
3963         }
3964
3965         /* Packets are ready, update Tx producer idx local and on card. */
3966         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3967
3968         tp->tx_prod = entry;
3969         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3970                 netif_stop_queue(dev);
3971                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3972                         netif_wake_queue(tp->dev);
3973         }
3974
3975 out_unlock:
3976         mmiowb();
3977         spin_unlock(&tp->tx_lock);
3978
3979         dev->trans_start = jiffies;
3980
3981         return NETDEV_TX_OK;
3982 }
3983
3984 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3985                                int new_mtu)
3986 {
3987         dev->mtu = new_mtu;
3988
3989         if (new_mtu > ETH_DATA_LEN) {
3990                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3991                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3992                         ethtool_op_set_tso(dev, 0);
3993                 }
3994                 else
3995                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3996         } else {
3997                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3998                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3999                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4000         }
4001 }
4002
4003 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4004 {
4005         struct tg3 *tp = netdev_priv(dev);
4006
4007         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4008                 return -EINVAL;
4009
4010         if (!netif_running(dev)) {
4011                 /* We'll just catch it later when the
4012                  * device is up'd.
4013                  */
4014                 tg3_set_mtu(dev, tp, new_mtu);
4015                 return 0;
4016         }
4017
4018         tg3_netif_stop(tp);
4019
4020         tg3_full_lock(tp, 1);
4021
4022         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4023
4024         tg3_set_mtu(dev, tp, new_mtu);
4025
4026         tg3_init_hw(tp);
4027
4028         tg3_netif_start(tp);
4029
4030         tg3_full_unlock(tp);
4031
4032         return 0;
4033 }
4034
4035 /* Free up pending packets in all rx/tx rings.
4036  *
4037  * The chip has been shut down and the driver detached from
4038  * the networking, so no interrupts or new tx packets will
4039  * end up in the driver.  tp->{tx,}lock is not held and we are not
4040  * in an interrupt context and thus may sleep.
4041  */
4042 static void tg3_free_rings(struct tg3 *tp)
4043 {
4044         struct ring_info *rxp;
4045         int i;
4046
4047         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4048                 rxp = &tp->rx_std_buffers[i];
4049
4050                 if (rxp->skb == NULL)
4051                         continue;
4052                 pci_unmap_single(tp->pdev,
4053                                  pci_unmap_addr(rxp, mapping),
4054                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4055                                  PCI_DMA_FROMDEVICE);
4056                 dev_kfree_skb_any(rxp->skb);
4057                 rxp->skb = NULL;
4058         }
4059
4060         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4061                 rxp = &tp->rx_jumbo_buffers[i];
4062
4063                 if (rxp->skb == NULL)
4064                         continue;
4065                 pci_unmap_single(tp->pdev,
4066                                  pci_unmap_addr(rxp, mapping),
4067                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4068                                  PCI_DMA_FROMDEVICE);
4069                 dev_kfree_skb_any(rxp->skb);
4070                 rxp->skb = NULL;
4071         }
4072
4073         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4074                 struct tx_ring_info *txp;
4075                 struct sk_buff *skb;
4076                 int j;
4077
4078                 txp = &tp->tx_buffers[i];
4079                 skb = txp->skb;
4080
4081                 if (skb == NULL) {
4082                         i++;
4083                         continue;
4084                 }
4085
4086                 pci_unmap_single(tp->pdev,
4087                                  pci_unmap_addr(txp, mapping),
4088                                  skb_headlen(skb),
4089                                  PCI_DMA_TODEVICE);
4090                 txp->skb = NULL;
4091
4092                 i++;
4093
4094                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4095                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4096                         pci_unmap_page(tp->pdev,
4097                                        pci_unmap_addr(txp, mapping),
4098                                        skb_shinfo(skb)->frags[j].size,
4099                                        PCI_DMA_TODEVICE);
4100                         i++;
4101                 }
4102
4103                 dev_kfree_skb_any(skb);
4104         }
4105 }
4106
4107 /* Initialize tx/rx rings for packet processing.
4108  *
4109  * The chip has been shut down and the driver detached from
4110  * the networking, so no interrupts or new tx packets will
4111  * end up in the driver.  tp->{tx,}lock are held and thus
4112  * we may not sleep.
4113  */
4114 static void tg3_init_rings(struct tg3 *tp)
4115 {
4116         u32 i;
4117
4118         /* Free up all the SKBs. */
4119         tg3_free_rings(tp);
4120
4121         /* Zero out all descriptors. */
4122         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4123         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4124         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4125         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4126
4127         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4128         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4129             (tp->dev->mtu > ETH_DATA_LEN))
4130                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4131
4132         /* Initialize invariants of the rings, we only set this
4133          * stuff once.  This works because the card does not
4134          * write into the rx buffer posting rings.
4135          */
4136         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4137                 struct tg3_rx_buffer_desc *rxd;
4138
4139                 rxd = &tp->rx_std[i];
4140                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4141                         << RXD_LEN_SHIFT;
4142                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4143                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4144                                (i << RXD_OPAQUE_INDEX_SHIFT));
4145         }
4146
4147         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4148                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4149                         struct tg3_rx_buffer_desc *rxd;
4150
4151                         rxd = &tp->rx_jumbo[i];
4152                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4153                                 << RXD_LEN_SHIFT;
4154                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4155                                 RXD_FLAG_JUMBO;
4156                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4157                                (i << RXD_OPAQUE_INDEX_SHIFT));
4158                 }
4159         }
4160
4161         /* Now allocate fresh SKBs for each rx ring. */
4162         for (i = 0; i < tp->rx_pending; i++) {
4163                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4164                                      -1, i) < 0)
4165                         break;
4166         }
4167
4168         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4169                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4170                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4171                                              -1, i) < 0)
4172                                 break;
4173                 }
4174         }
4175 }
4176
4177 /*
4178  * Must not be invoked with interrupt sources disabled and
4179  * the hardware shutdown down.
4180  */
4181 static void tg3_free_consistent(struct tg3 *tp)
4182 {
4183         kfree(tp->rx_std_buffers);
4184         tp->rx_std_buffers = NULL;
4185         if (tp->rx_std) {
4186                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4187                                     tp->rx_std, tp->rx_std_mapping);
4188                 tp->rx_std = NULL;
4189         }
4190         if (tp->rx_jumbo) {
4191                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4192                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4193                 tp->rx_jumbo = NULL;
4194         }
4195         if (tp->rx_rcb) {
4196                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4197                                     tp->rx_rcb, tp->rx_rcb_mapping);
4198                 tp->rx_rcb = NULL;
4199         }
4200         if (tp->tx_ring) {
4201                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4202                         tp->tx_ring, tp->tx_desc_mapping);
4203                 tp->tx_ring = NULL;
4204         }
4205         if (tp->hw_status) {
4206                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4207                                     tp->hw_status, tp->status_mapping);
4208                 tp->hw_status = NULL;
4209         }
4210         if (tp->hw_stats) {
4211                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4212                                     tp->hw_stats, tp->stats_mapping);
4213                 tp->hw_stats = NULL;
4214         }
4215 }
4216
4217 /*
4218  * Must not be invoked with interrupt sources disabled and
4219  * the hardware shutdown down.  Can sleep.
4220  */
4221 static int tg3_alloc_consistent(struct tg3 *tp)
4222 {
4223         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4224                                       (TG3_RX_RING_SIZE +
4225                                        TG3_RX_JUMBO_RING_SIZE)) +
4226                                      (sizeof(struct tx_ring_info) *
4227                                       TG3_TX_RING_SIZE),
4228                                      GFP_KERNEL);
4229         if (!tp->rx_std_buffers)
4230                 return -ENOMEM;
4231
4232         memset(tp->rx_std_buffers, 0,
4233                (sizeof(struct ring_info) *
4234                 (TG3_RX_RING_SIZE +
4235                  TG3_RX_JUMBO_RING_SIZE)) +
4236                (sizeof(struct tx_ring_info) *
4237                 TG3_TX_RING_SIZE));
4238
4239         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4240         tp->tx_buffers = (struct tx_ring_info *)
4241                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4242
4243         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4244                                           &tp->rx_std_mapping);
4245         if (!tp->rx_std)
4246                 goto err_out;
4247
4248         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4249                                             &tp->rx_jumbo_mapping);
4250
4251         if (!tp->rx_jumbo)
4252                 goto err_out;
4253
4254         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4255                                           &tp->rx_rcb_mapping);
4256         if (!tp->rx_rcb)
4257                 goto err_out;
4258
4259         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4260                                            &tp->tx_desc_mapping);
4261         if (!tp->tx_ring)
4262                 goto err_out;
4263
4264         tp->hw_status = pci_alloc_consistent(tp->pdev,
4265                                              TG3_HW_STATUS_SIZE,
4266                                              &tp->status_mapping);
4267         if (!tp->hw_status)
4268                 goto err_out;
4269
4270         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4271                                             sizeof(struct tg3_hw_stats),
4272                                             &tp->stats_mapping);
4273         if (!tp->hw_stats)
4274                 goto err_out;
4275
4276         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4277         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4278
4279         return 0;
4280
4281 err_out:
4282         tg3_free_consistent(tp);
4283         return -ENOMEM;
4284 }
4285
4286 #define MAX_WAIT_CNT 1000
4287
4288 /* To stop a block, clear the enable bit and poll till it
4289  * clears.  tp->lock is held.
4290  */
4291 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4292 {
4293         unsigned int i;
4294         u32 val;
4295
4296         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4297                 switch (ofs) {
4298                 case RCVLSC_MODE:
4299                 case DMAC_MODE:
4300                 case MBFREE_MODE:
4301                 case BUFMGR_MODE:
4302                 case MEMARB_MODE:
4303                         /* We can't enable/disable these bits of the
4304                          * 5705/5750, just say success.
4305                          */
4306                         return 0;
4307
4308                 default:
4309                         break;
4310                 };
4311         }
4312
4313         val = tr32(ofs);
4314         val &= ~enable_bit;
4315         tw32_f(ofs, val);
4316
4317         for (i = 0; i < MAX_WAIT_CNT; i++) {
4318                 udelay(100);
4319                 val = tr32(ofs);
4320                 if ((val & enable_bit) == 0)
4321                         break;
4322         }
4323
4324         if (i == MAX_WAIT_CNT && !silent) {
4325                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4326                        "ofs=%lx enable_bit=%x\n",
4327                        ofs, enable_bit);
4328                 return -ENODEV;
4329         }
4330
4331         return 0;
4332 }
4333
4334 /* tp->lock is held. */
4335 static int tg3_abort_hw(struct tg3 *tp, int silent)
4336 {
4337         int i, err;
4338
4339         tg3_disable_ints(tp);
4340
4341         tp->rx_mode &= ~RX_MODE_ENABLE;
4342         tw32_f(MAC_RX_MODE, tp->rx_mode);
4343         udelay(10);
4344
4345         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4346         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4347         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4348         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4349         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4350         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4351
4352         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4353         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4354         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4355         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4356         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4357         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4358         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4359
4360         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4361         tw32_f(MAC_MODE, tp->mac_mode);
4362         udelay(40);
4363
4364         tp->tx_mode &= ~TX_MODE_ENABLE;
4365         tw32_f(MAC_TX_MODE, tp->tx_mode);
4366
4367         for (i = 0; i < MAX_WAIT_CNT; i++) {
4368                 udelay(100);
4369                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4370                         break;
4371         }
4372         if (i >= MAX_WAIT_CNT) {
4373                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4374                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4375                        tp->dev->name, tr32(MAC_TX_MODE));
4376                 err |= -ENODEV;
4377         }
4378
4379         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4380         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4381         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4382
4383         tw32(FTQ_RESET, 0xffffffff);
4384         tw32(FTQ_RESET, 0x00000000);
4385
4386         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4387         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4388
4389         if (tp->hw_status)
4390                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4391         if (tp->hw_stats)
4392                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4393
4394         return err;
4395 }
4396
4397 /* tp->lock is held. */
4398 static int tg3_nvram_lock(struct tg3 *tp)
4399 {
4400         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4401                 int i;
4402
4403                 if (tp->nvram_lock_cnt == 0) {
4404                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4405                         for (i = 0; i < 8000; i++) {
4406                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4407                                         break;
4408                                 udelay(20);
4409                         }
4410                         if (i == 8000) {
4411                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4412                                 return -ENODEV;
4413                         }
4414                 }
4415                 tp->nvram_lock_cnt++;
4416         }
4417         return 0;
4418 }
4419
4420 /* tp->lock is held. */
4421 static void tg3_nvram_unlock(struct tg3 *tp)
4422 {
4423         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4424                 if (tp->nvram_lock_cnt > 0)
4425                         tp->nvram_lock_cnt--;
4426                 if (tp->nvram_lock_cnt == 0)
4427                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4428         }
4429 }
4430
4431 /* tp->lock is held. */
4432 static void tg3_enable_nvram_access(struct tg3 *tp)
4433 {
4434         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4435             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4436                 u32 nvaccess = tr32(NVRAM_ACCESS);
4437
4438                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4439         }
4440 }
4441
4442 /* tp->lock is held. */
4443 static void tg3_disable_nvram_access(struct tg3 *tp)
4444 {
4445         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4446             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4447                 u32 nvaccess = tr32(NVRAM_ACCESS);
4448
4449                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4450         }
4451 }
4452
4453 /* tp->lock is held. */
4454 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4455 {
4456         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4457                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4458                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4459
4460         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4461                 switch (kind) {
4462                 case RESET_KIND_INIT:
4463                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4464                                       DRV_STATE_START);
4465                         break;
4466
4467                 case RESET_KIND_SHUTDOWN:
4468                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4469                                       DRV_STATE_UNLOAD);
4470                         break;
4471
4472                 case RESET_KIND_SUSPEND:
4473                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4474                                       DRV_STATE_SUSPEND);
4475                         break;
4476
4477                 default:
4478                         break;
4479                 };
4480         }
4481 }
4482
4483 /* tp->lock is held. */
4484 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4485 {
4486         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4487                 switch (kind) {
4488                 case RESET_KIND_INIT:
4489                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4490                                       DRV_STATE_START_DONE);
4491                         break;
4492
4493                 case RESET_KIND_SHUTDOWN:
4494                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4495                                       DRV_STATE_UNLOAD_DONE);
4496                         break;
4497
4498                 default:
4499                         break;
4500                 };
4501         }
4502 }
4503
4504 /* tp->lock is held. */
4505 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4506 {
4507         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4508                 switch (kind) {
4509                 case RESET_KIND_INIT:
4510                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4511                                       DRV_STATE_START);
4512                         break;
4513
4514                 case RESET_KIND_SHUTDOWN:
4515                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4516                                       DRV_STATE_UNLOAD);
4517                         break;
4518
4519                 case RESET_KIND_SUSPEND:
4520                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4521                                       DRV_STATE_SUSPEND);
4522                         break;
4523
4524                 default:
4525                         break;
4526                 };
4527         }
4528 }
4529
4530 static void tg3_stop_fw(struct tg3 *);
4531
4532 /* tp->lock is held. */
4533 static int tg3_chip_reset(struct tg3 *tp)
4534 {
4535         u32 val;
4536         void (*write_op)(struct tg3 *, u32, u32);
4537         int i;
4538
4539         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4540                 tg3_nvram_lock(tp);
4541                 /* No matching tg3_nvram_unlock() after this because
4542                  * chip reset below will undo the nvram lock.
4543                  */
4544                 tp->nvram_lock_cnt = 0;
4545         }
4546
4547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4549                 tw32(GRC_FASTBOOT_PC, 0);
4550
4551         /*
4552          * We must avoid the readl() that normally takes place.
4553          * It locks machines, causes machine checks, and other
4554          * fun things.  So, temporarily disable the 5701
4555          * hardware workaround, while we do the reset.
4556          */
4557         write_op = tp->write32;
4558         if (write_op == tg3_write_flush_reg32)
4559                 tp->write32 = tg3_write32;
4560
4561         /* do the reset */
4562         val = GRC_MISC_CFG_CORECLK_RESET;
4563
4564         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4565                 if (tr32(0x7e2c) == 0x60) {
4566                         tw32(0x7e2c, 0x20);
4567                 }
4568                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4569                         tw32(GRC_MISC_CFG, (1 << 29));
4570                         val |= (1 << 29);
4571                 }
4572         }
4573
4574         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4575                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4576         tw32(GRC_MISC_CFG, val);
4577
4578         /* restore 5701 hardware bug workaround write method */
4579         tp->write32 = write_op;
4580
4581         /* Unfortunately, we have to delay before the PCI read back.
4582          * Some 575X chips even will not respond to a PCI cfg access
4583          * when the reset command is given to the chip.
4584          *
4585          * How do these hardware designers expect things to work
4586          * properly if the PCI write is posted for a long period
4587          * of time?  It is always necessary to have some method by
4588          * which a register read back can occur to push the write
4589          * out which does the reset.
4590          *
4591          * For most tg3 variants the trick below was working.
4592          * Ho hum...
4593          */
4594         udelay(120);
4595
4596         /* Flush PCI posted writes.  The normal MMIO registers
4597          * are inaccessible at this time so this is the only
4598          * way to make this reliably (actually, this is no longer
4599          * the case, see above).  I tried to use indirect
4600          * register read/write but this upset some 5701 variants.
4601          */
4602         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4603
4604         udelay(120);
4605
4606         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4607                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4608                         int i;
4609                         u32 cfg_val;
4610
4611                         /* Wait for link training to complete.  */
4612                         for (i = 0; i < 5000; i++)
4613                                 udelay(100);
4614
4615                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4616                         pci_write_config_dword(tp->pdev, 0xc4,
4617                                                cfg_val | (1 << 15));
4618                 }
4619                 /* Set PCIE max payload size and clear error status.  */
4620                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4621         }
4622
4623         /* Re-enable indirect register accesses. */
4624         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4625                                tp->misc_host_ctrl);
4626
4627         /* Set MAX PCI retry to zero. */
4628         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4629         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4630             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4631                 val |= PCISTATE_RETRY_SAME_DMA;
4632         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4633
4634         pci_restore_state(tp->pdev);
4635
4636         /* Make sure PCI-X relaxed ordering bit is clear. */
4637         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4638         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4639         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4640
4641         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4642                 u32 val;
4643
4644                 /* Chip reset on 5780 will reset MSI enable bit,
4645                  * so need to restore it.
4646                  */
4647                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4648                         u16 ctrl;
4649
4650                         pci_read_config_word(tp->pdev,
4651                                              tp->msi_cap + PCI_MSI_FLAGS,
4652                                              &ctrl);
4653                         pci_write_config_word(tp->pdev,
4654                                               tp->msi_cap + PCI_MSI_FLAGS,
4655                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4656                         val = tr32(MSGINT_MODE);
4657                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4658                 }
4659
4660                 val = tr32(MEMARB_MODE);
4661                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4662
4663         } else
4664                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4665
4666         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4667                 tg3_stop_fw(tp);
4668                 tw32(0x5000, 0x400);
4669         }
4670
4671         tw32(GRC_MODE, tp->grc_mode);
4672
4673         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4674                 u32 val = tr32(0xc4);
4675
4676                 tw32(0xc4, val | (1 << 15));
4677         }
4678
4679         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4681                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4682                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4683                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4684                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4685         }
4686
4687         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4688                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4689                 tw32_f(MAC_MODE, tp->mac_mode);
4690         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4691                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4692                 tw32_f(MAC_MODE, tp->mac_mode);
4693         } else
4694                 tw32_f(MAC_MODE, 0);
4695         udelay(40);
4696
4697         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4698                 /* Wait for firmware initialization to complete. */
4699                 for (i = 0; i < 100000; i++) {
4700                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4701                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4702                                 break;
4703                         udelay(10);
4704                 }
4705                 if (i >= 100000) {
4706                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4707                                "firmware will not restart magic=%08x\n",
4708                                tp->dev->name, val);
4709                         return -ENODEV;
4710                 }
4711         }
4712
4713         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4714             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4715                 u32 val = tr32(0x7c00);
4716
4717                 tw32(0x7c00, val | (1 << 25));
4718         }
4719
4720         /* Reprobe ASF enable state.  */
4721         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4722         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4723         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4724         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4725                 u32 nic_cfg;
4726
4727                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4728                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4729                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4730                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4731                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4732                 }
4733         }
4734
4735         return 0;
4736 }
4737
4738 /* tp->lock is held. */
4739 static void tg3_stop_fw(struct tg3 *tp)
4740 {
4741         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4742                 u32 val;
4743                 int i;
4744
4745                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4746                 val = tr32(GRC_RX_CPU_EVENT);
4747                 val |= (1 << 14);
4748                 tw32(GRC_RX_CPU_EVENT, val);
4749
4750                 /* Wait for RX cpu to ACK the event.  */
4751                 for (i = 0; i < 100; i++) {
4752                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4753                                 break;
4754                         udelay(1);
4755                 }
4756         }
4757 }
4758
4759 /* tp->lock is held. */
4760 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4761 {
4762         int err;
4763
4764         tg3_stop_fw(tp);
4765
4766         tg3_write_sig_pre_reset(tp, kind);
4767
4768         tg3_abort_hw(tp, silent);
4769         err = tg3_chip_reset(tp);
4770
4771         tg3_write_sig_legacy(tp, kind);
4772         tg3_write_sig_post_reset(tp, kind);
4773
4774         if (err)
4775                 return err;
4776
4777         return 0;
4778 }
4779
4780 #define TG3_FW_RELEASE_MAJOR    0x0
4781 #define TG3_FW_RELASE_MINOR     0x0
4782 #define TG3_FW_RELEASE_FIX      0x0
4783 #define TG3_FW_START_ADDR       0x08000000
4784 #define TG3_FW_TEXT_ADDR        0x08000000
4785 #define TG3_FW_TEXT_LEN         0x9c0
4786 #define TG3_FW_RODATA_ADDR      0x080009c0
4787 #define TG3_FW_RODATA_LEN       0x60
4788 #define TG3_FW_DATA_ADDR        0x08000a40
4789 #define TG3_FW_DATA_LEN         0x20
4790 #define TG3_FW_SBSS_ADDR        0x08000a60
4791 #define TG3_FW_SBSS_LEN         0xc
4792 #define TG3_FW_BSS_ADDR         0x08000a70
4793 #define TG3_FW_BSS_LEN          0x10
4794
4795 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4796         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4797         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4798         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4799         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4800         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4801         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4802         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4803         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4804         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4805         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4806         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4807         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4808         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4809         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4810         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4811         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4812         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4813         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4814         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4815         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4816         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4817         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4818         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4819         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4820         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4821         0, 0, 0, 0, 0, 0,
4822         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4823         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4824         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4825         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4826         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4827         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4828         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4829         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4830         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4831         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4832         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4834         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4835         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4836         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4837         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4838         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4839         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4840         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4841         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4842         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4843         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4844         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4845         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4846         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4847         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4848         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4849         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4850         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4851         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4852         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4853         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4854         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4855         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4856         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4857         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4858         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4859         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4860         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4861         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4862         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4863         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4864         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4865         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4866         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4867         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4868         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4869         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4870         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4871         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4872         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4873         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4874         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4875         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4876         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4877         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4878         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4879         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4880         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4881         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4882         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4883         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4884         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4885         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4886         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4887 };
4888
4889 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4890         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4891         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4892         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4893         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4894         0x00000000
4895 };
4896
4897 #if 0 /* All zeros, don't eat up space with it. */
4898 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4899         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4900         0x00000000, 0x00000000, 0x00000000, 0x00000000
4901 };
4902 #endif
4903
4904 #define RX_CPU_SCRATCH_BASE     0x30000
4905 #define RX_CPU_SCRATCH_SIZE     0x04000
4906 #define TX_CPU_SCRATCH_BASE     0x34000
4907 #define TX_CPU_SCRATCH_SIZE     0x04000
4908
4909 /* tp->lock is held. */
4910 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4911 {
4912         int i;
4913
4914         if (offset == TX_CPU_BASE &&
4915             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4916                 BUG();
4917
4918         if (offset == RX_CPU_BASE) {
4919                 for (i = 0; i < 10000; i++) {
4920                         tw32(offset + CPU_STATE, 0xffffffff);
4921                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4922                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4923                                 break;
4924                 }
4925
4926                 tw32(offset + CPU_STATE, 0xffffffff);
4927                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4928                 udelay(10);
4929         } else {
4930                 for (i = 0; i < 10000; i++) {
4931                         tw32(offset + CPU_STATE, 0xffffffff);
4932                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4933                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4934                                 break;
4935                 }
4936         }
4937
4938         if (i >= 10000) {
4939                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4940                        "and %s CPU\n",
4941                        tp->dev->name,
4942                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4943                 return -ENODEV;
4944         }
4945
4946         /* Clear firmware's nvram arbitration. */
4947         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4948                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4949         return 0;
4950 }
4951
4952 struct fw_info {
4953         unsigned int text_base;
4954         unsigned int text_len;
4955         u32 *text_data;
4956         unsigned int rodata_base;
4957         unsigned int rodata_len;
4958         u32 *rodata_data;
4959         unsigned int data_base;
4960         unsigned int data_len;
4961         u32 *data_data;
4962 };
4963
4964 /* tp->lock is held. */
4965 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4966                                  int cpu_scratch_size, struct fw_info *info)
4967 {
4968         int err, lock_err, i;
4969         void (*write_op)(struct tg3 *, u32, u32);
4970
4971         if (cpu_base == TX_CPU_BASE &&
4972             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4973                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4974                        "TX cpu firmware on %s which is 5705.\n",
4975                        tp->dev->name);
4976                 return -EINVAL;
4977         }
4978
4979         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4980                 write_op = tg3_write_mem;
4981         else
4982                 write_op = tg3_write_indirect_reg32;
4983
4984         /* It is possible that bootcode is still loading at this point.
4985          * Get the nvram lock first before halting the cpu.
4986          */
4987         lock_err = tg3_nvram_lock(tp);
4988         err = tg3_halt_cpu(tp, cpu_base);
4989         if (!lock_err)
4990                 tg3_nvram_unlock(tp);
4991         if (err)
4992                 goto out;
4993
4994         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4995                 write_op(tp, cpu_scratch_base + i, 0);
4996         tw32(cpu_base + CPU_STATE, 0xffffffff);
4997         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4998         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4999                 write_op(tp, (cpu_scratch_base +
5000                               (info->text_base & 0xffff) +
5001                               (i * sizeof(u32))),
5002                          (info->text_data ?
5003                           info->text_data[i] : 0));
5004         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5005                 write_op(tp, (cpu_scratch_base +
5006                               (info->rodata_base & 0xffff) +
5007                               (i * sizeof(u32))),
5008                          (info->rodata_data ?
5009                           info->rodata_data[i] : 0));
5010         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5011                 write_op(tp, (cpu_scratch_base +
5012                               (info->data_base & 0xffff) +
5013                               (i * sizeof(u32))),
5014                          (info->data_data ?
5015                           info->data_data[i] : 0));
5016
5017         err = 0;
5018
5019 out:
5020         return err;
5021 }
5022
5023 /* tp->lock is held. */
5024 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5025 {
5026         struct fw_info info;
5027         int err, i;
5028
5029         info.text_base = TG3_FW_TEXT_ADDR;
5030         info.text_len = TG3_FW_TEXT_LEN;
5031         info.text_data = &tg3FwText[0];
5032         info.rodata_base = TG3_FW_RODATA_ADDR;
5033         info.rodata_len = TG3_FW_RODATA_LEN;
5034         info.rodata_data = &tg3FwRodata[0];
5035         info.data_base = TG3_FW_DATA_ADDR;
5036         info.data_len = TG3_FW_DATA_LEN;
5037         info.data_data = NULL;
5038
5039         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5040                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5041                                     &info);
5042         if (err)
5043                 return err;
5044
5045         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5046                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5047                                     &info);
5048         if (err)
5049                 return err;
5050
5051         /* Now startup only the RX cpu. */
5052         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5053         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5054
5055         for (i = 0; i < 5; i++) {
5056                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5057                         break;
5058                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5059                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5060                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5061                 udelay(1000);
5062         }
5063         if (i >= 5) {
5064                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5065                        "to set RX CPU PC, is %08x should be %08x\n",
5066                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5067                        TG3_FW_TEXT_ADDR);
5068                 return -ENODEV;
5069         }
5070         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5071         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5072
5073         return 0;
5074 }
5075
5076 #if TG3_TSO_SUPPORT != 0
5077
5078 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5079 #define TG3_TSO_FW_RELASE_MINOR         0x6
5080 #define TG3_TSO_FW_RELEASE_FIX          0x0
5081 #define TG3_TSO_FW_START_ADDR           0x08000000
5082 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5083 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5084 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5085 #define TG3_TSO_FW_RODATA_LEN           0x60
5086 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5087 #define TG3_TSO_FW_DATA_LEN             0x30
5088 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5089 #define TG3_TSO_FW_SBSS_LEN             0x2c
5090 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5091 #define TG3_TSO_FW_BSS_LEN              0x894
5092
5093 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5094         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5095         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5096         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5097         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5098         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5099         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5100         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5101         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5102         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5103         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5104         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5105         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5106         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5107         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5108         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5109         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5110         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5111         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5112         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5113         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5114         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5115         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5116         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5117         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5118         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5119         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5120         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5121         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5122         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5123         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5124         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5125         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5126         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5127         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5128         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5129         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5130         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5131         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5132         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5133         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5134         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5135         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5136         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5137         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5138         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5139         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5140         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5141         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5142         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5143         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5144         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5145         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5146         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5147         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5148         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5149         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5150         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5151         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5152         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5153         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5154         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5155         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5156         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5157         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5158         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5159         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5160         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5161         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5162         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5163         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5164         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5165         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5166         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5167         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5168         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5169         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5170         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5171         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5172         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5173         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5174         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5175         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5176         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5177         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5178         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5179         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5180         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5181         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5182         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5183         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5184         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5185         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5186         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5187         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5188         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5189         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5190         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5191         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5192         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5193         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5194         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5195         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5196         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5197         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5198         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5199         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5200         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5201         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5202         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5203         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5204         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5205         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5206         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5207         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5208         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5209         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5210         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5211         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5212         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5213         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5214         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5215         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5216         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5217         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5218         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5219         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5220         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5221         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5222         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5223         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5224         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5225         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5226         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5227         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5228         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5229         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5230         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5231         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5232         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5233         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5234         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5235         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5236         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5237         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5238         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5239         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5240         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5241         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5242         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5243         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5244         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5245         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5246         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5247         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5248         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5249         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5250         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5251         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5252         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5253         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5254         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5255         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5256         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5257         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5258         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5259         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5260         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5261         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5262         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5263         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5264         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5265         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5266         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5267         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5268         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5269         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5270         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5271         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5272         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5273         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5274         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5275         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5276         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5277         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5278         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5279         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5280         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5281         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5282         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5283         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5284         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5285         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5286         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5287         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5288         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5289         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5290         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5291         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5292         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5293         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5294         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5295         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5296         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5297         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5298         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5299         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5300         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5301         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5302         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5303         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5304         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5305         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5306         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5307         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5308         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5309         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5310         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5311         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5312         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5313         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5314         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5315         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5316         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5317         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5318         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5319         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5320         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5321         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5322         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5323         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5324         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5325         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5326         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5327         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5328         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5329         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5330         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5331         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5332         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5333         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5334         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5335         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5336         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5337         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5338         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5339         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5340         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5341         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5342         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5343         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5344         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5345         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5346         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5347         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5348         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5349         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5350         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5351         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5352         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5353         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5354         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5355         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5356         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5357         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5358         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5359         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5360         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5361         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5362         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5363         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5364         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5365         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5366         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5367         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5368         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5369         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5370         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5371         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5372         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5373         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5374         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5375         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5376         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5377         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5378 };
5379
5380 static u32 tg3TsoFwRodata[] = {
5381         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5382         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5383         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5384         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5385         0x00000000,
5386 };
5387
5388 static u32 tg3TsoFwData[] = {
5389         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5390         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5391         0x00000000,
5392 };
5393
5394 /* 5705 needs a special version of the TSO firmware.  */
5395 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5396 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5397 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5398 #define TG3_TSO5_FW_START_ADDR          0x00010000
5399 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5400 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5401 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5402 #define TG3_TSO5_FW_RODATA_LEN          0x50
5403 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5404 #define TG3_TSO5_FW_DATA_LEN            0x20
5405 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5406 #define TG3_TSO5_FW_SBSS_LEN            0x28
5407 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5408 #define TG3_TSO5_FW_BSS_LEN             0x88
5409
5410 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5411         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5412         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5413         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5414         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5415         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5416         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5417         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5418         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5419         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5420         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5421         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5422         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5423         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5424         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5425         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5426         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5427         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5428         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5429         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5430         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5431         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5432         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5433         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5434         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5435         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5436         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5437         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5438         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5439         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5440         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5441         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5442         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5443         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5444         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5445         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5446         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5447         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5448         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5449         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5450         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5451         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5452         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5453         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5454         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5455         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5456         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5457         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5458         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5459         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5460         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5461         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5462         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5463         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5464         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5465         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5466         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5467         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5468         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5469         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5470         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5471         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5472         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5473         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5474         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5475         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5476         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5477         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5478         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5479         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5480         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5481         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5482         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5483         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5484         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5485         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5486         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5487         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5488         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5489         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5490         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5491         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5492         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5493         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5494         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5495         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5496         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5497         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5498         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5499         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5500         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5501         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5502         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5503         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5504         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5505         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5506         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5507         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5508         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5509         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5510         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5511         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5512         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5513         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5514         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5515         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5516         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5517         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5518         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5519         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5520         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5521         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5522         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5523         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5524         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5525         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5526         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5527         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5528         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5529         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5530         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5531         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5532         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5533         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5534         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5535         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5536         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5537         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5538         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5539         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5540         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5541         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5542         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5543         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5544         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5545         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5546         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5547         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5548         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5549         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5550         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5551         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5552         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5553         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5554         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5555         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5556         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5557         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5558         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5559         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5560         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5561         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5562         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5563         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5564         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5565         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5566         0x00000000, 0x00000000, 0x00000000,
5567 };
5568
5569 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5570         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5571         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5572         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5573         0x00000000, 0x00000000, 0x00000000,
5574 };
5575
5576 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5577         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5578         0x00000000, 0x00000000, 0x00000000,
5579 };
5580
5581 /* tp->lock is held. */
5582 static int tg3_load_tso_firmware(struct tg3 *tp)
5583 {
5584         struct fw_info info;
5585         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5586         int err, i;
5587
5588         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5589                 return 0;
5590
5591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5592                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5593                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5594                 info.text_data = &tg3Tso5FwText[0];
5595                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5596                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5597                 info.rodata_data = &tg3Tso5FwRodata[0];
5598                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5599                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5600                 info.data_data = &tg3Tso5FwData[0];
5601                 cpu_base = RX_CPU_BASE;
5602                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5603                 cpu_scratch_size = (info.text_len +
5604                                     info.rodata_len +
5605                                     info.data_len +
5606                                     TG3_TSO5_FW_SBSS_LEN +
5607                                     TG3_TSO5_FW_BSS_LEN);
5608         } else {
5609                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5610                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5611                 info.text_data = &tg3TsoFwText[0];
5612                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5613                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5614                 info.rodata_data = &tg3TsoFwRodata[0];
5615                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5616                 info.data_len = TG3_TSO_FW_DATA_LEN;
5617                 info.data_data = &tg3TsoFwData[0];
5618                 cpu_base = TX_CPU_BASE;
5619                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5620                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5621         }
5622
5623         err = tg3_load_firmware_cpu(tp, cpu_base,
5624                                     cpu_scratch_base, cpu_scratch_size,
5625                                     &info);
5626         if (err)
5627                 return err;
5628
5629         /* Now startup the cpu. */
5630         tw32(cpu_base + CPU_STATE, 0xffffffff);
5631         tw32_f(cpu_base + CPU_PC,    info.text_base);
5632
5633         for (i = 0; i < 5; i++) {
5634                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5635                         break;
5636                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5637                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5638                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5639                 udelay(1000);
5640         }
5641         if (i >= 5) {
5642                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5643                        "to set CPU PC, is %08x should be %08x\n",
5644                        tp->dev->name, tr32(cpu_base + CPU_PC),
5645                        info.text_base);
5646                 return -ENODEV;
5647         }
5648         tw32(cpu_base + CPU_STATE, 0xffffffff);
5649         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5650         return 0;
5651 }
5652
5653 #endif /* TG3_TSO_SUPPORT != 0 */
5654
5655 /* tp->lock is held. */
5656 static void __tg3_set_mac_addr(struct tg3 *tp)
5657 {
5658         u32 addr_high, addr_low;
5659         int i;
5660
5661         addr_high = ((tp->dev->dev_addr[0] << 8) |
5662                      tp->dev->dev_addr[1]);
5663         addr_low = ((tp->dev->dev_addr[2] << 24) |
5664                     (tp->dev->dev_addr[3] << 16) |
5665                     (tp->dev->dev_addr[4] <<  8) |
5666                     (tp->dev->dev_addr[5] <<  0));
5667         for (i = 0; i < 4; i++) {
5668                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5669                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5670         }
5671
5672         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5674                 for (i = 0; i < 12; i++) {
5675                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5676                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5677                 }
5678         }
5679
5680         addr_high = (tp->dev->dev_addr[0] +
5681                      tp->dev->dev_addr[1] +
5682                      tp->dev->dev_addr[2] +
5683                      tp->dev->dev_addr[3] +
5684                      tp->dev->dev_addr[4] +
5685                      tp->dev->dev_addr[5]) &
5686                 TX_BACKOFF_SEED_MASK;
5687         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5688 }
5689
5690 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5691 {
5692         struct tg3 *tp = netdev_priv(dev);
5693         struct sockaddr *addr = p;
5694
5695         if (!is_valid_ether_addr(addr->sa_data))
5696                 return -EINVAL;
5697
5698         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5699
5700         if (!netif_running(dev))
5701                 return 0;
5702
5703         spin_lock_bh(&tp->lock);
5704         __tg3_set_mac_addr(tp);
5705         spin_unlock_bh(&tp->lock);
5706
5707         return 0;
5708 }
5709
5710 /* tp->lock is held. */
5711 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5712                            dma_addr_t mapping, u32 maxlen_flags,
5713                            u32 nic_addr)
5714 {
5715         tg3_write_mem(tp,
5716                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5717                       ((u64) mapping >> 32));
5718         tg3_write_mem(tp,
5719                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5720                       ((u64) mapping & 0xffffffff));
5721         tg3_write_mem(tp,
5722                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5723                        maxlen_flags);
5724
5725         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5726                 tg3_write_mem(tp,
5727                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5728                               nic_addr);
5729 }
5730
5731 static void __tg3_set_rx_mode(struct net_device *);
5732 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5733 {
5734         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5735         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5736         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5737         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5738         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5739                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5740                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5741         }
5742         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5743         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5744         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5745                 u32 val = ec->stats_block_coalesce_usecs;
5746
5747                 if (!netif_carrier_ok(tp->dev))
5748                         val = 0;
5749
5750                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5751         }
5752 }
5753
5754 /* tp->lock is held. */
5755 static int tg3_reset_hw(struct tg3 *tp)
5756 {
5757         u32 val, rdmac_mode;
5758         int i, err, limit;
5759
5760         tg3_disable_ints(tp);
5761
5762         tg3_stop_fw(tp);
5763
5764         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5765
5766         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5767                 tg3_abort_hw(tp, 1);
5768         }
5769
5770         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5771                 tg3_phy_reset(tp);
5772
5773         err = tg3_chip_reset(tp);
5774         if (err)
5775                 return err;
5776
5777         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5778
5779         /* This works around an issue with Athlon chipsets on
5780          * B3 tigon3 silicon.  This bit has no effect on any
5781          * other revision.  But do not set this on PCI Express
5782          * chips.
5783          */
5784         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5785                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5786         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5787
5788         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5789             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5790                 val = tr32(TG3PCI_PCISTATE);
5791                 val |= PCISTATE_RETRY_SAME_DMA;
5792                 tw32(TG3PCI_PCISTATE, val);
5793         }
5794
5795         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5796                 /* Enable some hw fixes.  */
5797                 val = tr32(TG3PCI_MSI_DATA);
5798                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5799                 tw32(TG3PCI_MSI_DATA, val);
5800         }
5801
5802         /* Descriptor ring init may make accesses to the
5803          * NIC SRAM area to setup the TX descriptors, so we
5804          * can only do this after the hardware has been
5805          * successfully reset.
5806          */
5807         tg3_init_rings(tp);
5808
5809         /* This value is determined during the probe time DMA
5810          * engine test, tg3_test_dma.
5811          */
5812         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5813
5814         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5815                           GRC_MODE_4X_NIC_SEND_RINGS |
5816                           GRC_MODE_NO_TX_PHDR_CSUM |
5817                           GRC_MODE_NO_RX_PHDR_CSUM);
5818         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5819         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5820                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5821         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5822                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5823
5824         tw32(GRC_MODE,
5825              tp->grc_mode |
5826              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5827
5828         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5829         val = tr32(GRC_MISC_CFG);
5830         val &= ~0xff;
5831         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5832         tw32(GRC_MISC_CFG, val);
5833
5834         /* Initialize MBUF/DESC pool. */
5835         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5836                 /* Do nothing.  */
5837         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5838                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5839                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5840                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5841                 else
5842                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5843                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5844                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5845         }
5846 #if TG3_TSO_SUPPORT != 0
5847         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5848                 int fw_len;
5849
5850                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5851                           TG3_TSO5_FW_RODATA_LEN +
5852                           TG3_TSO5_FW_DATA_LEN +
5853                           TG3_TSO5_FW_SBSS_LEN +
5854                           TG3_TSO5_FW_BSS_LEN);
5855                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5856                 tw32(BUFMGR_MB_POOL_ADDR,
5857                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5858                 tw32(BUFMGR_MB_POOL_SIZE,
5859                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5860         }
5861 #endif
5862
5863         if (tp->dev->mtu <= ETH_DATA_LEN) {
5864                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5865                      tp->bufmgr_config.mbuf_read_dma_low_water);
5866                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5867                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5868                 tw32(BUFMGR_MB_HIGH_WATER,
5869                      tp->bufmgr_config.mbuf_high_water);
5870         } else {
5871                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5872                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5873                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5874                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5875                 tw32(BUFMGR_MB_HIGH_WATER,
5876                      tp->bufmgr_config.mbuf_high_water_jumbo);
5877         }
5878         tw32(BUFMGR_DMA_LOW_WATER,
5879              tp->bufmgr_config.dma_low_water);
5880         tw32(BUFMGR_DMA_HIGH_WATER,
5881              tp->bufmgr_config.dma_high_water);
5882
5883         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5884         for (i = 0; i < 2000; i++) {
5885                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5886                         break;
5887                 udelay(10);
5888         }
5889         if (i >= 2000) {
5890                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5891                        tp->dev->name);
5892                 return -ENODEV;
5893         }
5894
5895         /* Setup replenish threshold. */
5896         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5897
5898         /* Initialize TG3_BDINFO's at:
5899          *  RCVDBDI_STD_BD:     standard eth size rx ring
5900          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5901          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5902          *
5903          * like so:
5904          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5905          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5906          *                              ring attribute flags
5907          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5908          *
5909          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5910          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5911          *
5912          * The size of each ring is fixed in the firmware, but the location is
5913          * configurable.
5914          */
5915         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5916              ((u64) tp->rx_std_mapping >> 32));
5917         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5918              ((u64) tp->rx_std_mapping & 0xffffffff));
5919         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5920              NIC_SRAM_RX_BUFFER_DESC);
5921
5922         /* Don't even try to program the JUMBO/MINI buffer descriptor
5923          * configs on 5705.
5924          */
5925         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5926                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5927                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5928         } else {
5929                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5930                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5931
5932                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5933                      BDINFO_FLAGS_DISABLED);
5934
5935                 /* Setup replenish threshold. */
5936                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5937
5938                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5939                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5940                              ((u64) tp->rx_jumbo_mapping >> 32));
5941                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5942                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5943                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5944                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5945                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5946                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5947                 } else {
5948                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5949                              BDINFO_FLAGS_DISABLED);
5950                 }
5951
5952         }
5953
5954         /* There is only one send ring on 5705/5750, no need to explicitly
5955          * disable the others.
5956          */
5957         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5958                 /* Clear out send RCB ring in SRAM. */
5959                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5960                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5961                                       BDINFO_FLAGS_DISABLED);
5962         }
5963
5964         tp->tx_prod = 0;
5965         tp->tx_cons = 0;
5966         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5967         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5968
5969         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5970                        tp->tx_desc_mapping,
5971                        (TG3_TX_RING_SIZE <<
5972                         BDINFO_FLAGS_MAXLEN_SHIFT),
5973                        NIC_SRAM_TX_BUFFER_DESC);
5974
5975         /* There is only one receive return ring on 5705/5750, no need
5976          * to explicitly disable the others.
5977          */
5978         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5979                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5980                      i += TG3_BDINFO_SIZE) {
5981                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5982                                       BDINFO_FLAGS_DISABLED);
5983                 }
5984         }
5985
5986         tp->rx_rcb_ptr = 0;
5987         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5988
5989         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5990                        tp->rx_rcb_mapping,
5991                        (TG3_RX_RCB_RING_SIZE(tp) <<
5992                         BDINFO_FLAGS_MAXLEN_SHIFT),
5993                        0);
5994
5995         tp->rx_std_ptr = tp->rx_pending;
5996         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5997                      tp->rx_std_ptr);
5998
5999         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6000                                                 tp->rx_jumbo_pending : 0;
6001         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6002                      tp->rx_jumbo_ptr);
6003
6004         /* Initialize MAC address and backoff seed. */
6005         __tg3_set_mac_addr(tp);
6006
6007         /* MTU + ethernet header + FCS + optional VLAN tag */
6008         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6009
6010         /* The slot time is changed by tg3_setup_phy if we
6011          * run at gigabit with half duplex.
6012          */
6013         tw32(MAC_TX_LENGTHS,
6014              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6015              (6 << TX_LENGTHS_IPG_SHIFT) |
6016              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6017
6018         /* Receive rules. */
6019         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6020         tw32(RCVLPC_CONFIG, 0x0181);
6021
6022         /* Calculate RDMAC_MODE setting early, we need it to determine
6023          * the RCVLPC_STATE_ENABLE mask.
6024          */
6025         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6026                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6027                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6028                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6029                       RDMAC_MODE_LNGREAD_ENAB);
6030         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6031                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6032
6033         /* If statement applies to 5705 and 5750 PCI devices only */
6034         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6035              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6036             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6037                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6038                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6039                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6040                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6041                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6042                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6043                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6044                 }
6045         }
6046
6047         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6048                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6049
6050 #if TG3_TSO_SUPPORT != 0
6051         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6052                 rdmac_mode |= (1 << 27);
6053 #endif
6054
6055         /* Receive/send statistics. */
6056         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6057             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6058                 val = tr32(RCVLPC_STATS_ENABLE);
6059                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6060                 tw32(RCVLPC_STATS_ENABLE, val);
6061         } else {
6062                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6063         }
6064         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6065         tw32(SNDDATAI_STATSENAB, 0xffffff);
6066         tw32(SNDDATAI_STATSCTRL,
6067              (SNDDATAI_SCTRL_ENABLE |
6068               SNDDATAI_SCTRL_FASTUPD));
6069
6070         /* Setup host coalescing engine. */
6071         tw32(HOSTCC_MODE, 0);
6072         for (i = 0; i < 2000; i++) {
6073                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6074                         break;
6075                 udelay(10);
6076         }
6077
6078         __tg3_set_coalesce(tp, &tp->coal);
6079
6080         /* set status block DMA address */
6081         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6082              ((u64) tp->status_mapping >> 32));
6083         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6084              ((u64) tp->status_mapping & 0xffffffff));
6085
6086         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6087                 /* Status/statistics block address.  See tg3_timer,
6088                  * the tg3_periodic_fetch_stats call there, and
6089                  * tg3_get_stats to see how this works for 5705/5750 chips.
6090                  */
6091                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6092                      ((u64) tp->stats_mapping >> 32));
6093                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6094                      ((u64) tp->stats_mapping & 0xffffffff));
6095                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6096                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6097         }
6098
6099         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6100
6101         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6102         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6103         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6104                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6105
6106         /* Clear statistics/status block in chip, and status block in ram. */
6107         for (i = NIC_SRAM_STATS_BLK;
6108              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6109              i += sizeof(u32)) {
6110                 tg3_write_mem(tp, i, 0);
6111                 udelay(40);
6112         }
6113         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6114
6115         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6116                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6117                 /* reset to prevent losing 1st rx packet intermittently */
6118                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6119                 udelay(10);
6120         }
6121
6122         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6123                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6124         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6125         udelay(40);
6126
6127         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6128          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6129          * register to preserve the GPIO settings for LOMs. The GPIOs,
6130          * whether used as inputs or outputs, are set by boot code after
6131          * reset.
6132          */
6133         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6134                 u32 gpio_mask;
6135
6136                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6137                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6138
6139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6140                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6141                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6142
6143                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6144
6145                 /* GPIO1 must be driven high for eeprom write protect */
6146                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6147                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6148         }
6149         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6150         udelay(100);
6151
6152         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6153         tp->last_tag = 0;
6154
6155         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6156                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6157                 udelay(40);
6158         }
6159
6160         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6161                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6162                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6163                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6164                WDMAC_MODE_LNGREAD_ENAB);
6165
6166         /* If statement applies to 5705 and 5750 PCI devices only */
6167         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6168              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6169             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6170                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6171                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6172                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6173                         /* nothing */
6174                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6175                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6176                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6177                         val |= WDMAC_MODE_RX_ACCEL;
6178                 }
6179         }
6180
6181         /* Enable host coalescing bug fix */
6182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
6183                 val |= (1 << 29);
6184
6185         tw32_f(WDMAC_MODE, val);
6186         udelay(40);
6187
6188         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6189                 val = tr32(TG3PCI_X_CAPS);
6190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6191                         val &= ~PCIX_CAPS_BURST_MASK;
6192                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6193                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6194                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6195                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6196                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6197                                 val |= (tp->split_mode_max_reqs <<
6198                                         PCIX_CAPS_SPLIT_SHIFT);
6199                 }
6200                 tw32(TG3PCI_X_CAPS, val);
6201         }
6202
6203         tw32_f(RDMAC_MODE, rdmac_mode);
6204         udelay(40);
6205
6206         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6207         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6208                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6209         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6210         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6211         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6212         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6213         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6214 #if TG3_TSO_SUPPORT != 0
6215         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6216                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6217 #endif
6218         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6219         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6220
6221         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6222                 err = tg3_load_5701_a0_firmware_fix(tp);
6223                 if (err)
6224                         return err;
6225         }
6226
6227 #if TG3_TSO_SUPPORT != 0
6228         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6229                 err = tg3_load_tso_firmware(tp);
6230                 if (err)
6231                         return err;
6232         }
6233 #endif
6234
6235         tp->tx_mode = TX_MODE_ENABLE;
6236         tw32_f(MAC_TX_MODE, tp->tx_mode);
6237         udelay(100);
6238
6239         tp->rx_mode = RX_MODE_ENABLE;
6240         tw32_f(MAC_RX_MODE, tp->rx_mode);
6241         udelay(10);
6242
6243         if (tp->link_config.phy_is_low_power) {
6244                 tp->link_config.phy_is_low_power = 0;
6245                 tp->link_config.speed = tp->link_config.orig_speed;
6246                 tp->link_config.duplex = tp->link_config.orig_duplex;
6247                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6248         }
6249
6250         tp->mi_mode = MAC_MI_MODE_BASE;
6251         tw32_f(MAC_MI_MODE, tp->mi_mode);
6252         udelay(80);
6253
6254         tw32(MAC_LED_CTRL, tp->led_ctrl);
6255
6256         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6257         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6258                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6259                 udelay(10);
6260         }
6261         tw32_f(MAC_RX_MODE, tp->rx_mode);
6262         udelay(10);
6263
6264         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6265                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6266                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6267                         /* Set drive transmission level to 1.2V  */
6268                         /* only if the signal pre-emphasis bit is not set  */
6269                         val = tr32(MAC_SERDES_CFG);
6270                         val &= 0xfffff000;
6271                         val |= 0x880;
6272                         tw32(MAC_SERDES_CFG, val);
6273                 }
6274                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6275                         tw32(MAC_SERDES_CFG, 0x616000);
6276         }
6277
6278         /* Prevent chip from dropping frames when flow control
6279          * is enabled.
6280          */
6281         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6282
6283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6284             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6285                 /* Use hardware link auto-negotiation */
6286                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6287         }
6288
6289         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6290             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6291                 u32 tmp;
6292
6293                 tmp = tr32(SERDES_RX_CTRL);
6294                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6295                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6296                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6297                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6298         }
6299
6300         err = tg3_setup_phy(tp, 1);
6301         if (err)
6302                 return err;
6303
6304         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6305                 u32 tmp;
6306
6307                 /* Clear CRC stats. */
6308                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6309                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6310                         tg3_readphy(tp, 0x14, &tmp);
6311                 }
6312         }
6313
6314         __tg3_set_rx_mode(tp->dev);
6315
6316         /* Initialize receive rules. */
6317         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6318         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6319         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6320         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6321
6322         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6323             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6324                 limit = 8;
6325         else
6326                 limit = 16;
6327         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6328                 limit -= 4;
6329         switch (limit) {
6330         case 16:
6331                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6332         case 15:
6333                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6334         case 14:
6335                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6336         case 13:
6337                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6338         case 12:
6339                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6340         case 11:
6341                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6342         case 10:
6343                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6344         case 9:
6345                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6346         case 8:
6347                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6348         case 7:
6349                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6350         case 6:
6351                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6352         case 5:
6353                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6354         case 4:
6355                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6356         case 3:
6357                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6358         case 2:
6359         case 1:
6360
6361         default:
6362                 break;
6363         };
6364
6365         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6366
6367         return 0;
6368 }
6369
6370 /* Called at device open time to get the chip ready for
6371  * packet processing.  Invoked with tp->lock held.
6372  */
6373 static int tg3_init_hw(struct tg3 *tp)
6374 {
6375         int err;
6376
6377         /* Force the chip into D0. */
6378         err = tg3_set_power_state(tp, PCI_D0);
6379         if (err)
6380                 goto out;
6381
6382         tg3_switch_clocks(tp);
6383
6384         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6385
6386         err = tg3_reset_hw(tp);
6387
6388 out:
6389         return err;
6390 }
6391
6392 #define TG3_STAT_ADD32(PSTAT, REG) \
6393 do {    u32 __val = tr32(REG); \
6394         (PSTAT)->low += __val; \
6395         if ((PSTAT)->low < __val) \
6396                 (PSTAT)->high += 1; \
6397 } while (0)
6398
6399 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6400 {
6401         struct tg3_hw_stats *sp = tp->hw_stats;
6402
6403         if (!netif_carrier_ok(tp->dev))
6404                 return;
6405
6406         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6407         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6408         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6409         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6410         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6411         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6412         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6413         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6414         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6415         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6416         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6417         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6418         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6419
6420         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6421         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6422         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6423         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6424         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6425         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6426         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6427         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6428         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6429         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6430         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6431         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6432         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6433         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6434 }
6435
6436 static void tg3_timer(unsigned long __opaque)
6437 {
6438         struct tg3 *tp = (struct tg3 *) __opaque;
6439
6440         spin_lock(&tp->lock);
6441
6442         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6443                 /* All of this garbage is because when using non-tagged
6444                  * IRQ status the mailbox/status_block protocol the chip
6445                  * uses with the cpu is race prone.
6446                  */
6447                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6448                         tw32(GRC_LOCAL_CTRL,
6449                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6450                 } else {
6451                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6452                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6453                 }
6454
6455                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6456                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6457                         spin_unlock(&tp->lock);
6458                         schedule_work(&tp->reset_task);
6459                         return;
6460                 }
6461         }
6462
6463         /* This part only runs once per second. */
6464         if (!--tp->timer_counter) {
6465                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6466                         tg3_periodic_fetch_stats(tp);
6467
6468                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6469                         u32 mac_stat;
6470                         int phy_event;
6471
6472                         mac_stat = tr32(MAC_STATUS);
6473
6474                         phy_event = 0;
6475                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6476                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6477                                         phy_event = 1;
6478                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6479                                 phy_event = 1;
6480
6481                         if (phy_event)
6482                                 tg3_setup_phy(tp, 0);
6483                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6484                         u32 mac_stat = tr32(MAC_STATUS);
6485                         int need_setup = 0;
6486
6487                         if (netif_carrier_ok(tp->dev) &&
6488                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6489                                 need_setup = 1;
6490                         }
6491                         if (! netif_carrier_ok(tp->dev) &&
6492                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6493                                          MAC_STATUS_SIGNAL_DET))) {
6494                                 need_setup = 1;
6495                         }
6496                         if (need_setup) {
6497                                 tw32_f(MAC_MODE,
6498                                      (tp->mac_mode &
6499                                       ~MAC_MODE_PORT_MODE_MASK));
6500                                 udelay(40);
6501                                 tw32_f(MAC_MODE, tp->mac_mode);
6502                                 udelay(40);
6503                                 tg3_setup_phy(tp, 0);
6504                         }
6505                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6506                         tg3_serdes_parallel_detect(tp);
6507
6508                 tp->timer_counter = tp->timer_multiplier;
6509         }
6510
6511         /* Heartbeat is only sent once every 2 seconds.  */
6512         if (!--tp->asf_counter) {
6513                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6514                         u32 val;
6515
6516                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6517                                            FWCMD_NICDRV_ALIVE2);
6518                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6519                         /* 5 seconds timeout */
6520                         tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6521                         val = tr32(GRC_RX_CPU_EVENT);
6522                         val |= (1 << 14);
6523                         tw32(GRC_RX_CPU_EVENT, val);
6524                 }
6525                 tp->asf_counter = tp->asf_multiplier;
6526         }
6527
6528         spin_unlock(&tp->lock);
6529
6530         tp->timer.expires = jiffies + tp->timer_offset;
6531         add_timer(&tp->timer);
6532 }
6533
6534 static int tg3_request_irq(struct tg3 *tp)
6535 {
6536         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6537         unsigned long flags;
6538         struct net_device *dev = tp->dev;
6539
6540         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6541                 fn = tg3_msi;
6542                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6543                         fn = tg3_msi_1shot;
6544                 flags = SA_SAMPLE_RANDOM;
6545         } else {
6546                 fn = tg3_interrupt;
6547                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6548                         fn = tg3_interrupt_tagged;
6549                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6550         }
6551         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6552 }
6553
6554 static int tg3_test_interrupt(struct tg3 *tp)
6555 {
6556         struct net_device *dev = tp->dev;
6557         int err, i;
6558         u32 int_mbox = 0;
6559
6560         if (!netif_running(dev))
6561                 return -ENODEV;
6562
6563         tg3_disable_ints(tp);
6564
6565         free_irq(tp->pdev->irq, dev);
6566
6567         err = request_irq(tp->pdev->irq, tg3_test_isr,
6568                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6569         if (err)
6570                 return err;
6571
6572         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6573         tg3_enable_ints(tp);
6574
6575         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6576                HOSTCC_MODE_NOW);
6577
6578         for (i = 0; i < 5; i++) {
6579                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6580                                         TG3_64BIT_REG_LOW);
6581                 if (int_mbox != 0)
6582                         break;
6583                 msleep(10);
6584         }
6585
6586         tg3_disable_ints(tp);
6587
6588         free_irq(tp->pdev->irq, dev);
6589         
6590         err = tg3_request_irq(tp);
6591
6592         if (err)
6593                 return err;
6594
6595         if (int_mbox != 0)
6596                 return 0;
6597
6598         return -EIO;
6599 }
6600
6601 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6602  * successfully restored
6603  */
6604 static int tg3_test_msi(struct tg3 *tp)
6605 {
6606         struct net_device *dev = tp->dev;
6607         int err;
6608         u16 pci_cmd;
6609
6610         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6611                 return 0;
6612
6613         /* Turn off SERR reporting in case MSI terminates with Master
6614          * Abort.
6615          */
6616         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6617         pci_write_config_word(tp->pdev, PCI_COMMAND,
6618                               pci_cmd & ~PCI_COMMAND_SERR);
6619
6620         err = tg3_test_interrupt(tp);
6621
6622         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6623
6624         if (!err)
6625                 return 0;
6626
6627         /* other failures */
6628         if (err != -EIO)
6629                 return err;
6630
6631         /* MSI test failed, go back to INTx mode */
6632         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6633                "switching to INTx mode. Please report this failure to "
6634                "the PCI maintainer and include system chipset information.\n",
6635                        tp->dev->name);
6636
6637         free_irq(tp->pdev->irq, dev);
6638         pci_disable_msi(tp->pdev);
6639
6640         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6641
6642         err = tg3_request_irq(tp);
6643         if (err)
6644                 return err;
6645
6646         /* Need to reset the chip because the MSI cycle may have terminated
6647          * with Master Abort.
6648          */
6649         tg3_full_lock(tp, 1);
6650
6651         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6652         err = tg3_init_hw(tp);
6653
6654         tg3_full_unlock(tp);
6655
6656         if (err)
6657                 free_irq(tp->pdev->irq, dev);
6658
6659         return err;
6660 }
6661
6662 static int tg3_open(struct net_device *dev)
6663 {
6664         struct tg3 *tp = netdev_priv(dev);
6665         int err;
6666
6667         tg3_full_lock(tp, 0);
6668
6669         err = tg3_set_power_state(tp, PCI_D0);
6670         if (err)
6671                 return err;
6672
6673         tg3_disable_ints(tp);
6674         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6675
6676         tg3_full_unlock(tp);
6677
6678         /* The placement of this call is tied
6679          * to the setup and use of Host TX descriptors.
6680          */
6681         err = tg3_alloc_consistent(tp);
6682         if (err)
6683                 return err;
6684
6685         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6686             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6687             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6688             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6689               (tp->pdev_peer == tp->pdev))) {
6690                 /* All MSI supporting chips should support tagged
6691                  * status.  Assert that this is the case.
6692                  */
6693                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6694                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6695                                "Not using MSI.\n", tp->dev->name);
6696                 } else if (pci_enable_msi(tp->pdev) == 0) {
6697                         u32 msi_mode;
6698
6699                         msi_mode = tr32(MSGINT_MODE);
6700                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6701                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6702                 }
6703         }
6704         err = tg3_request_irq(tp);
6705
6706         if (err) {
6707                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6708                         pci_disable_msi(tp->pdev);
6709                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6710                 }
6711                 tg3_free_consistent(tp);
6712                 return err;
6713         }
6714
6715         tg3_full_lock(tp, 0);
6716
6717         err = tg3_init_hw(tp);
6718         if (err) {
6719                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6720                 tg3_free_rings(tp);
6721         } else {
6722                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6723                         tp->timer_offset = HZ;
6724                 else
6725                         tp->timer_offset = HZ / 10;
6726
6727                 BUG_ON(tp->timer_offset > HZ);
6728                 tp->timer_counter = tp->timer_multiplier =
6729                         (HZ / tp->timer_offset);
6730                 tp->asf_counter = tp->asf_multiplier =
6731                         ((HZ / tp->timer_offset) * 2);
6732
6733                 init_timer(&tp->timer);
6734                 tp->timer.expires = jiffies + tp->timer_offset;
6735                 tp->timer.data = (unsigned long) tp;
6736                 tp->timer.function = tg3_timer;
6737         }
6738
6739         tg3_full_unlock(tp);
6740
6741         if (err) {
6742                 free_irq(tp->pdev->irq, dev);
6743                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6744                         pci_disable_msi(tp->pdev);
6745                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6746                 }
6747                 tg3_free_consistent(tp);
6748                 return err;
6749         }
6750
6751         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6752                 err = tg3_test_msi(tp);
6753
6754                 if (err) {
6755                         tg3_full_lock(tp, 0);
6756
6757                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6758                                 pci_disable_msi(tp->pdev);
6759                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6760                         }
6761                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6762                         tg3_free_rings(tp);
6763                         tg3_free_consistent(tp);
6764
6765                         tg3_full_unlock(tp);
6766
6767                         return err;
6768                 }
6769
6770                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6771                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6772                                 u32 val = tr32(0x7c04);
6773
6774                                 tw32(0x7c04, val | (1 << 29));
6775                         }
6776                 }
6777         }
6778
6779         tg3_full_lock(tp, 0);
6780
6781         add_timer(&tp->timer);
6782         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6783         tg3_enable_ints(tp);
6784
6785         tg3_full_unlock(tp);
6786
6787         netif_start_queue(dev);
6788
6789         return 0;
6790 }
6791
6792 #if 0
6793 /*static*/ void tg3_dump_state(struct tg3 *tp)
6794 {
6795         u32 val32, val32_2, val32_3, val32_4, val32_5;
6796         u16 val16;
6797         int i;
6798
6799         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6800         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6801         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6802                val16, val32);
6803
6804         /* MAC block */
6805         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6806                tr32(MAC_MODE), tr32(MAC_STATUS));
6807         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6808                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6809         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6810                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6811         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6812                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6813
6814         /* Send data initiator control block */
6815         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6816                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6817         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6818                tr32(SNDDATAI_STATSCTRL));
6819
6820         /* Send data completion control block */
6821         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6822
6823         /* Send BD ring selector block */
6824         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6825                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6826
6827         /* Send BD initiator control block */
6828         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6829                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6830
6831         /* Send BD completion control block */
6832         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6833
6834         /* Receive list placement control block */
6835         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6836                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6837         printk("       RCVLPC_STATSCTRL[%08x]\n",
6838                tr32(RCVLPC_STATSCTRL));
6839
6840         /* Receive data and receive BD initiator control block */
6841         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6842                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6843
6844         /* Receive data completion control block */
6845         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6846                tr32(RCVDCC_MODE));
6847
6848         /* Receive BD initiator control block */
6849         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6850                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6851
6852         /* Receive BD completion control block */
6853         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6854                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6855
6856         /* Receive list selector control block */
6857         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6858                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6859
6860         /* Mbuf cluster free block */
6861         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6862                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6863
6864         /* Host coalescing control block */
6865         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6866                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6867         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6868                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6869                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6870         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6871                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6872                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6873         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6874                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6875         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6876                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6877
6878         /* Memory arbiter control block */
6879         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6880                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6881
6882         /* Buffer manager control block */
6883         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6884                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6885         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6886                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6887         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6888                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6889                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6890                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6891
6892         /* Read DMA control block */
6893         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6894                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6895
6896         /* Write DMA control block */
6897         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6898                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6899
6900         /* DMA completion block */
6901         printk("DEBUG: DMAC_MODE[%08x]\n",
6902                tr32(DMAC_MODE));
6903
6904         /* GRC block */
6905         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6906                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6907         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6908                tr32(GRC_LOCAL_CTRL));
6909
6910         /* TG3_BDINFOs */
6911         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6912                tr32(RCVDBDI_JUMBO_BD + 0x0),
6913                tr32(RCVDBDI_JUMBO_BD + 0x4),
6914                tr32(RCVDBDI_JUMBO_BD + 0x8),
6915                tr32(RCVDBDI_JUMBO_BD + 0xc));
6916         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6917                tr32(RCVDBDI_STD_BD + 0x0),
6918                tr32(RCVDBDI_STD_BD + 0x4),
6919                tr32(RCVDBDI_STD_BD + 0x8),
6920                tr32(RCVDBDI_STD_BD + 0xc));
6921         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6922                tr32(RCVDBDI_MINI_BD + 0x0),
6923                tr32(RCVDBDI_MINI_BD + 0x4),
6924                tr32(RCVDBDI_MINI_BD + 0x8),
6925                tr32(RCVDBDI_MINI_BD + 0xc));
6926
6927         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6928         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6929         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6930         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6931         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6932                val32, val32_2, val32_3, val32_4);
6933
6934         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6935         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6936         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6937         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6938         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6939                val32, val32_2, val32_3, val32_4);
6940
6941         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6942         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6943         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6944         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6945         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6946         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6947                val32, val32_2, val32_3, val32_4, val32_5);
6948
6949         /* SW status block */
6950         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6951                tp->hw_status->status,
6952                tp->hw_status->status_tag,
6953                tp->hw_status->rx_jumbo_consumer,
6954                tp->hw_status->rx_consumer,
6955                tp->hw_status->rx_mini_consumer,
6956                tp->hw_status->idx[0].rx_producer,
6957                tp->hw_status->idx[0].tx_consumer);
6958
6959         /* SW statistics block */
6960         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6961                ((u32 *)tp->hw_stats)[0],
6962                ((u32 *)tp->hw_stats)[1],
6963                ((u32 *)tp->hw_stats)[2],
6964                ((u32 *)tp->hw_stats)[3]);
6965
6966         /* Mailboxes */
6967         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6968                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
6969                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
6970                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
6971                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
6972
6973         /* NIC side send descriptors. */
6974         for (i = 0; i < 6; i++) {
6975                 unsigned long txd;
6976
6977                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
6978                         + (i * sizeof(struct tg3_tx_buffer_desc));
6979                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6980                        i,
6981                        readl(txd + 0x0), readl(txd + 0x4),
6982                        readl(txd + 0x8), readl(txd + 0xc));
6983         }
6984
6985         /* NIC side RX descriptors. */
6986         for (i = 0; i < 6; i++) {
6987                 unsigned long rxd;
6988
6989                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
6990                         + (i * sizeof(struct tg3_rx_buffer_desc));
6991                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6992                        i,
6993                        readl(rxd + 0x0), readl(rxd + 0x4),
6994                        readl(rxd + 0x8), readl(rxd + 0xc));
6995                 rxd += (4 * sizeof(u32));
6996                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6997                        i,
6998                        readl(rxd + 0x0), readl(rxd + 0x4),
6999                        readl(rxd + 0x8), readl(rxd + 0xc));
7000         }
7001
7002         for (i = 0; i < 6; i++) {
7003                 unsigned long rxd;
7004
7005                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7006                         + (i * sizeof(struct tg3_rx_buffer_desc));
7007                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7008                        i,
7009                        readl(rxd + 0x0), readl(rxd + 0x4),
7010                        readl(rxd + 0x8), readl(rxd + 0xc));
7011                 rxd += (4 * sizeof(u32));
7012                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7013                        i,
7014                        readl(rxd + 0x0), readl(rxd + 0x4),
7015                        readl(rxd + 0x8), readl(rxd + 0xc));
7016         }
7017 }
7018 #endif
7019
7020 static struct net_device_stats *tg3_get_stats(struct net_device *);
7021 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7022
7023 static int tg3_close(struct net_device *dev)
7024 {
7025         struct tg3 *tp = netdev_priv(dev);
7026
7027         /* Calling flush_scheduled_work() may deadlock because
7028          * linkwatch_event() may be on the workqueue and it will try to get
7029          * the rtnl_lock which we are holding.
7030          */
7031         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7032                 msleep(1);
7033
7034         netif_stop_queue(dev);
7035
7036         del_timer_sync(&tp->timer);
7037
7038         tg3_full_lock(tp, 1);
7039 #if 0
7040         tg3_dump_state(tp);
7041 #endif
7042
7043         tg3_disable_ints(tp);
7044
7045         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7046         tg3_free_rings(tp);
7047         tp->tg3_flags &=
7048                 ~(TG3_FLAG_INIT_COMPLETE |
7049                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7050
7051         tg3_full_unlock(tp);
7052
7053         free_irq(tp->pdev->irq, dev);
7054         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7055                 pci_disable_msi(tp->pdev);
7056                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7057         }
7058
7059         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7060                sizeof(tp->net_stats_prev));
7061         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7062                sizeof(tp->estats_prev));
7063
7064         tg3_free_consistent(tp);
7065
7066         tg3_set_power_state(tp, PCI_D3hot);
7067
7068         netif_carrier_off(tp->dev);
7069
7070         return 0;
7071 }
7072
7073 static inline unsigned long get_stat64(tg3_stat64_t *val)
7074 {
7075         unsigned long ret;
7076
7077 #if (BITS_PER_LONG == 32)
7078         ret = val->low;
7079 #else
7080         ret = ((u64)val->high << 32) | ((u64)val->low);
7081 #endif
7082         return ret;
7083 }
7084
7085 static unsigned long calc_crc_errors(struct tg3 *tp)
7086 {
7087         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7088
7089         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7090             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7092                 u32 val;
7093
7094                 spin_lock_bh(&tp->lock);
7095                 if (!tg3_readphy(tp, 0x1e, &val)) {
7096                         tg3_writephy(tp, 0x1e, val | 0x8000);
7097                         tg3_readphy(tp, 0x14, &val);
7098                 } else
7099                         val = 0;
7100                 spin_unlock_bh(&tp->lock);
7101
7102                 tp->phy_crc_errors += val;
7103
7104                 return tp->phy_crc_errors;
7105         }
7106
7107         return get_stat64(&hw_stats->rx_fcs_errors);
7108 }
7109
7110 #define ESTAT_ADD(member) \
7111         estats->member =        old_estats->member + \
7112                                 get_stat64(&hw_stats->member)
7113
7114 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7115 {
7116         struct tg3_ethtool_stats *estats = &tp->estats;
7117         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7118         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7119
7120         if (!hw_stats)
7121                 return old_estats;
7122
7123         ESTAT_ADD(rx_octets);
7124         ESTAT_ADD(rx_fragments);
7125         ESTAT_ADD(rx_ucast_packets);
7126         ESTAT_ADD(rx_mcast_packets);
7127         ESTAT_ADD(rx_bcast_packets);
7128         ESTAT_ADD(rx_fcs_errors);
7129         ESTAT_ADD(rx_align_errors);
7130         ESTAT_ADD(rx_xon_pause_rcvd);
7131         ESTAT_ADD(rx_xoff_pause_rcvd);
7132         ESTAT_ADD(rx_mac_ctrl_rcvd);
7133         ESTAT_ADD(rx_xoff_entered);
7134         ESTAT_ADD(rx_frame_too_long_errors);
7135         ESTAT_ADD(rx_jabbers);
7136         ESTAT_ADD(rx_undersize_packets);
7137         ESTAT_ADD(rx_in_length_errors);
7138         ESTAT_ADD(rx_out_length_errors);
7139         ESTAT_ADD(rx_64_or_less_octet_packets);
7140         ESTAT_ADD(rx_65_to_127_octet_packets);
7141         ESTAT_ADD(rx_128_to_255_octet_packets);
7142         ESTAT_ADD(rx_256_to_511_octet_packets);
7143         ESTAT_ADD(rx_512_to_1023_octet_packets);
7144         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7145         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7146         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7147         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7148         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7149
7150         ESTAT_ADD(tx_octets);
7151         ESTAT_ADD(tx_collisions);
7152         ESTAT_ADD(tx_xon_sent);
7153         ESTAT_ADD(tx_xoff_sent);
7154         ESTAT_ADD(tx_flow_control);
7155         ESTAT_ADD(tx_mac_errors);
7156         ESTAT_ADD(tx_single_collisions);
7157         ESTAT_ADD(tx_mult_collisions);
7158         ESTAT_ADD(tx_deferred);
7159         ESTAT_ADD(tx_excessive_collisions);
7160         ESTAT_ADD(tx_late_collisions);
7161         ESTAT_ADD(tx_collide_2times);
7162         ESTAT_ADD(tx_collide_3times);
7163         ESTAT_ADD(tx_collide_4times);
7164         ESTAT_ADD(tx_collide_5times);
7165         ESTAT_ADD(tx_collide_6times);
7166         ESTAT_ADD(tx_collide_7times);
7167         ESTAT_ADD(tx_collide_8times);
7168         ESTAT_ADD(tx_collide_9times);
7169         ESTAT_ADD(tx_collide_10times);
7170         ESTAT_ADD(tx_collide_11times);
7171         ESTAT_ADD(tx_collide_12times);
7172         ESTAT_ADD(tx_collide_13times);
7173         ESTAT_ADD(tx_collide_14times);
7174         ESTAT_ADD(tx_collide_15times);
7175         ESTAT_ADD(tx_ucast_packets);
7176         ESTAT_ADD(tx_mcast_packets);
7177         ESTAT_ADD(tx_bcast_packets);
7178         ESTAT_ADD(tx_carrier_sense_errors);
7179         ESTAT_ADD(tx_discards);
7180         ESTAT_ADD(tx_errors);
7181
7182         ESTAT_ADD(dma_writeq_full);
7183         ESTAT_ADD(dma_write_prioq_full);
7184         ESTAT_ADD(rxbds_empty);
7185         ESTAT_ADD(rx_discards);
7186         ESTAT_ADD(rx_errors);
7187         ESTAT_ADD(rx_threshold_hit);
7188
7189         ESTAT_ADD(dma_readq_full);
7190         ESTAT_ADD(dma_read_prioq_full);
7191         ESTAT_ADD(tx_comp_queue_full);
7192
7193         ESTAT_ADD(ring_set_send_prod_index);
7194         ESTAT_ADD(ring_status_update);
7195         ESTAT_ADD(nic_irqs);
7196         ESTAT_ADD(nic_avoided_irqs);
7197         ESTAT_ADD(nic_tx_threshold_hit);
7198
7199         return estats;
7200 }
7201
7202 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7203 {
7204         struct tg3 *tp = netdev_priv(dev);
7205         struct net_device_stats *stats = &tp->net_stats;
7206         struct net_device_stats *old_stats = &tp->net_stats_prev;
7207         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7208
7209         if (!hw_stats)
7210                 return old_stats;
7211
7212         stats->rx_packets = old_stats->rx_packets +
7213                 get_stat64(&hw_stats->rx_ucast_packets) +
7214                 get_stat64(&hw_stats->rx_mcast_packets) +
7215                 get_stat64(&hw_stats->rx_bcast_packets);
7216                 
7217         stats->tx_packets = old_stats->tx_packets +
7218                 get_stat64(&hw_stats->tx_ucast_packets) +
7219                 get_stat64(&hw_stats->tx_mcast_packets) +
7220                 get_stat64(&hw_stats->tx_bcast_packets);
7221
7222         stats->rx_bytes = old_stats->rx_bytes +
7223                 get_stat64(&hw_stats->rx_octets);
7224         stats->tx_bytes = old_stats->tx_bytes +
7225                 get_stat64(&hw_stats->tx_octets);
7226
7227         stats->rx_errors = old_stats->rx_errors +
7228                 get_stat64(&hw_stats->rx_errors);
7229         stats->tx_errors = old_stats->tx_errors +
7230                 get_stat64(&hw_stats->tx_errors) +
7231                 get_stat64(&hw_stats->tx_mac_errors) +
7232                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7233                 get_stat64(&hw_stats->tx_discards);
7234
7235         stats->multicast = old_stats->multicast +
7236                 get_stat64(&hw_stats->rx_mcast_packets);
7237         stats->collisions = old_stats->collisions +
7238                 get_stat64(&hw_stats->tx_collisions);
7239
7240         stats->rx_length_errors = old_stats->rx_length_errors +
7241                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7242                 get_stat64(&hw_stats->rx_undersize_packets);
7243
7244         stats->rx_over_errors = old_stats->rx_over_errors +
7245                 get_stat64(&hw_stats->rxbds_empty);
7246         stats->rx_frame_errors = old_stats->rx_frame_errors +
7247                 get_stat64(&hw_stats->rx_align_errors);
7248         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7249                 get_stat64(&hw_stats->tx_discards);
7250         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7251                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7252
7253         stats->rx_crc_errors = old_stats->rx_crc_errors +
7254                 calc_crc_errors(tp);
7255
7256         stats->rx_missed_errors = old_stats->rx_missed_errors +
7257                 get_stat64(&hw_stats->rx_discards);
7258
7259         return stats;
7260 }
7261
7262 static inline u32 calc_crc(unsigned char *buf, int len)
7263 {
7264         u32 reg;
7265         u32 tmp;
7266         int j, k;
7267
7268         reg = 0xffffffff;
7269
7270         for (j = 0; j < len; j++) {
7271                 reg ^= buf[j];
7272
7273                 for (k = 0; k < 8; k++) {
7274                         tmp = reg & 0x01;
7275
7276                         reg >>= 1;
7277
7278                         if (tmp) {
7279                                 reg ^= 0xedb88320;
7280                         }
7281                 }
7282         }
7283
7284         return ~reg;
7285 }
7286
7287 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7288 {
7289         /* accept or reject all multicast frames */
7290         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7291         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7292         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7293         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7294 }
7295
7296 static void __tg3_set_rx_mode(struct net_device *dev)
7297 {
7298         struct tg3 *tp = netdev_priv(dev);
7299         u32 rx_mode;
7300
7301         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7302                                   RX_MODE_KEEP_VLAN_TAG);
7303
7304         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7305          * flag clear.
7306          */
7307 #if TG3_VLAN_TAG_USED
7308         if (!tp->vlgrp &&
7309             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7310                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7311 #else
7312         /* By definition, VLAN is disabled always in this
7313          * case.
7314          */
7315         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7316                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7317 #endif
7318
7319         if (dev->flags & IFF_PROMISC) {
7320                 /* Promiscuous mode. */
7321                 rx_mode |= RX_MODE_PROMISC;
7322         } else if (dev->flags & IFF_ALLMULTI) {
7323                 /* Accept all multicast. */
7324                 tg3_set_multi (tp, 1);
7325         } else if (dev->mc_count < 1) {
7326                 /* Reject all multicast. */
7327                 tg3_set_multi (tp, 0);
7328         } else {
7329                 /* Accept one or more multicast(s). */
7330                 struct dev_mc_list *mclist;
7331                 unsigned int i;
7332                 u32 mc_filter[4] = { 0, };
7333                 u32 regidx;
7334                 u32 bit;
7335                 u32 crc;
7336
7337                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7338                      i++, mclist = mclist->next) {
7339
7340                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7341                         bit = ~crc & 0x7f;
7342                         regidx = (bit & 0x60) >> 5;
7343                         bit &= 0x1f;
7344                         mc_filter[regidx] |= (1 << bit);
7345                 }
7346
7347                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7348                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7349                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7350                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7351         }
7352
7353         if (rx_mode != tp->rx_mode) {
7354                 tp->rx_mode = rx_mode;
7355                 tw32_f(MAC_RX_MODE, rx_mode);
7356                 udelay(10);
7357         }
7358 }
7359
7360 static void tg3_set_rx_mode(struct net_device *dev)
7361 {
7362         struct tg3 *tp = netdev_priv(dev);
7363
7364         if (!netif_running(dev))
7365                 return;
7366
7367         tg3_full_lock(tp, 0);
7368         __tg3_set_rx_mode(dev);
7369         tg3_full_unlock(tp);
7370 }
7371
7372 #define TG3_REGDUMP_LEN         (32 * 1024)
7373
7374 static int tg3_get_regs_len(struct net_device *dev)
7375 {
7376         return TG3_REGDUMP_LEN;
7377 }
7378
7379 static void tg3_get_regs(struct net_device *dev,
7380                 struct ethtool_regs *regs, void *_p)
7381 {
7382         u32 *p = _p;
7383         struct tg3 *tp = netdev_priv(dev);
7384         u8 *orig_p = _p;
7385         int i;
7386
7387         regs->version = 0;
7388
7389         memset(p, 0, TG3_REGDUMP_LEN);
7390
7391         if (tp->link_config.phy_is_low_power)
7392                 return;
7393
7394         tg3_full_lock(tp, 0);
7395
7396 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7397 #define GET_REG32_LOOP(base,len)                \
7398 do {    p = (u32 *)(orig_p + (base));           \
7399         for (i = 0; i < len; i += 4)            \
7400                 __GET_REG32((base) + i);        \
7401 } while (0)
7402 #define GET_REG32_1(reg)                        \
7403 do {    p = (u32 *)(orig_p + (reg));            \
7404         __GET_REG32((reg));                     \
7405 } while (0)
7406
7407         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7408         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7409         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7410         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7411         GET_REG32_1(SNDDATAC_MODE);
7412         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7413         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7414         GET_REG32_1(SNDBDC_MODE);
7415         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7416         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7417         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7418         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7419         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7420         GET_REG32_1(RCVDCC_MODE);
7421         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7422         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7423         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7424         GET_REG32_1(MBFREE_MODE);
7425         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7426         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7427         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7428         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7429         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7430         GET_REG32_1(RX_CPU_MODE);
7431         GET_REG32_1(RX_CPU_STATE);
7432         GET_REG32_1(RX_CPU_PGMCTR);
7433         GET_REG32_1(RX_CPU_HWBKPT);
7434         GET_REG32_1(TX_CPU_MODE);
7435         GET_REG32_1(TX_CPU_STATE);
7436         GET_REG32_1(TX_CPU_PGMCTR);
7437         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7438         GET_REG32_LOOP(FTQ_RESET, 0x120);
7439         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7440         GET_REG32_1(DMAC_MODE);
7441         GET_REG32_LOOP(GRC_MODE, 0x4c);
7442         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7443                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7444
7445 #undef __GET_REG32
7446 #undef GET_REG32_LOOP
7447 #undef GET_REG32_1
7448
7449         tg3_full_unlock(tp);
7450 }
7451
7452 static int tg3_get_eeprom_len(struct net_device *dev)
7453 {
7454         struct tg3 *tp = netdev_priv(dev);
7455
7456         return tp->nvram_size;
7457 }
7458
7459 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7460 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7461
7462 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7463 {
7464         struct tg3 *tp = netdev_priv(dev);
7465         int ret;
7466         u8  *pd;
7467         u32 i, offset, len, val, b_offset, b_count;
7468
7469         if (tp->link_config.phy_is_low_power)
7470                 return -EAGAIN;
7471
7472         offset = eeprom->offset;
7473         len = eeprom->len;
7474         eeprom->len = 0;
7475
7476         eeprom->magic = TG3_EEPROM_MAGIC;
7477
7478         if (offset & 3) {
7479                 /* adjustments to start on required 4 byte boundary */
7480                 b_offset = offset & 3;
7481                 b_count = 4 - b_offset;
7482                 if (b_count > len) {
7483                         /* i.e. offset=1 len=2 */
7484                         b_count = len;
7485                 }
7486                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7487                 if (ret)
7488                         return ret;
7489                 val = cpu_to_le32(val);
7490                 memcpy(data, ((char*)&val) + b_offset, b_count);
7491                 len -= b_count;
7492                 offset += b_count;
7493                 eeprom->len += b_count;
7494         }
7495
7496         /* read bytes upto the last 4 byte boundary */
7497         pd = &data[eeprom->len];
7498         for (i = 0; i < (len - (len & 3)); i += 4) {
7499                 ret = tg3_nvram_read(tp, offset + i, &val);
7500                 if (ret) {
7501                         eeprom->len += i;
7502                         return ret;
7503                 }
7504                 val = cpu_to_le32(val);
7505                 memcpy(pd + i, &val, 4);
7506         }
7507         eeprom->len += i;
7508
7509         if (len & 3) {
7510                 /* read last bytes not ending on 4 byte boundary */
7511                 pd = &data[eeprom->len];
7512                 b_count = len & 3;
7513                 b_offset = offset + len - b_count;
7514                 ret = tg3_nvram_read(tp, b_offset, &val);
7515                 if (ret)
7516                         return ret;
7517                 val = cpu_to_le32(val);
7518                 memcpy(pd, ((char*)&val), b_count);
7519                 eeprom->len += b_count;
7520         }
7521         return 0;
7522 }
7523
7524 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7525
7526 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7527 {
7528         struct tg3 *tp = netdev_priv(dev);
7529         int ret;
7530         u32 offset, len, b_offset, odd_len, start, end;
7531         u8 *buf;
7532
7533         if (tp->link_config.phy_is_low_power)
7534                 return -EAGAIN;
7535
7536         if (eeprom->magic != TG3_EEPROM_MAGIC)
7537                 return -EINVAL;
7538
7539         offset = eeprom->offset;
7540         len = eeprom->len;
7541
7542         if ((b_offset = (offset & 3))) {
7543                 /* adjustments to start on required 4 byte boundary */
7544                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7545                 if (ret)
7546                         return ret;
7547                 start = cpu_to_le32(start);
7548                 len += b_offset;
7549                 offset &= ~3;
7550                 if (len < 4)
7551                         len = 4;
7552         }
7553
7554         odd_len = 0;
7555         if (len & 3) {
7556                 /* adjustments to end on required 4 byte boundary */
7557                 odd_len = 1;
7558                 len = (len + 3) & ~3;
7559                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7560                 if (ret)
7561                         return ret;
7562                 end = cpu_to_le32(end);
7563         }
7564
7565         buf = data;
7566         if (b_offset || odd_len) {
7567                 buf = kmalloc(len, GFP_KERNEL);
7568                 if (buf == 0)
7569                         return -ENOMEM;
7570                 if (b_offset)
7571                         memcpy(buf, &start, 4);
7572                 if (odd_len)
7573                         memcpy(buf+len-4, &end, 4);
7574                 memcpy(buf + b_offset, data, eeprom->len);
7575         }
7576
7577         ret = tg3_nvram_write_block(tp, offset, len, buf);
7578
7579         if (buf != data)
7580                 kfree(buf);
7581
7582         return ret;
7583 }
7584
7585 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7586 {
7587         struct tg3 *tp = netdev_priv(dev);
7588   
7589         cmd->supported = (SUPPORTED_Autoneg);
7590
7591         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7592                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7593                                    SUPPORTED_1000baseT_Full);
7594
7595         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7596                 cmd->supported |= (SUPPORTED_100baseT_Half |
7597                                   SUPPORTED_100baseT_Full |
7598                                   SUPPORTED_10baseT_Half |
7599                                   SUPPORTED_10baseT_Full |
7600                                   SUPPORTED_MII);
7601         else
7602                 cmd->supported |= SUPPORTED_FIBRE;
7603   
7604         cmd->advertising = tp->link_config.advertising;
7605         if (netif_running(dev)) {
7606                 cmd->speed = tp->link_config.active_speed;
7607                 cmd->duplex = tp->link_config.active_duplex;
7608         }
7609         cmd->port = 0;
7610         cmd->phy_address = PHY_ADDR;
7611         cmd->transceiver = 0;
7612         cmd->autoneg = tp->link_config.autoneg;
7613         cmd->maxtxpkt = 0;
7614         cmd->maxrxpkt = 0;
7615         return 0;
7616 }
7617   
7618 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7619 {
7620         struct tg3 *tp = netdev_priv(dev);
7621   
7622         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7623                 /* These are the only valid advertisement bits allowed.  */
7624                 if (cmd->autoneg == AUTONEG_ENABLE &&
7625                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7626                                           ADVERTISED_1000baseT_Full |
7627                                           ADVERTISED_Autoneg |
7628                                           ADVERTISED_FIBRE)))
7629                         return -EINVAL;
7630                 /* Fiber can only do SPEED_1000.  */
7631                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7632                          (cmd->speed != SPEED_1000))
7633                         return -EINVAL;
7634         /* Copper cannot force SPEED_1000.  */
7635         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7636                    (cmd->speed == SPEED_1000))
7637                 return -EINVAL;
7638         else if ((cmd->speed == SPEED_1000) &&
7639                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7640                 return -EINVAL;
7641
7642         tg3_full_lock(tp, 0);
7643
7644         tp->link_config.autoneg = cmd->autoneg;
7645         if (cmd->autoneg == AUTONEG_ENABLE) {
7646                 tp->link_config.advertising = cmd->advertising;
7647                 tp->link_config.speed = SPEED_INVALID;
7648                 tp->link_config.duplex = DUPLEX_INVALID;
7649         } else {
7650                 tp->link_config.advertising = 0;
7651                 tp->link_config.speed = cmd->speed;
7652                 tp->link_config.duplex = cmd->duplex;
7653         }
7654   
7655         if (netif_running(dev))
7656                 tg3_setup_phy(tp, 1);
7657
7658         tg3_full_unlock(tp);
7659   
7660         return 0;
7661 }
7662   
7663 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7664 {
7665         struct tg3 *tp = netdev_priv(dev);
7666   
7667         strcpy(info->driver, DRV_MODULE_NAME);
7668         strcpy(info->version, DRV_MODULE_VERSION);
7669         strcpy(info->fw_version, tp->fw_ver);
7670         strcpy(info->bus_info, pci_name(tp->pdev));
7671 }
7672   
7673 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7674 {
7675         struct tg3 *tp = netdev_priv(dev);
7676   
7677         wol->supported = WAKE_MAGIC;
7678         wol->wolopts = 0;
7679         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7680                 wol->wolopts = WAKE_MAGIC;
7681         memset(&wol->sopass, 0, sizeof(wol->sopass));
7682 }
7683   
7684 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7685 {
7686         struct tg3 *tp = netdev_priv(dev);
7687   
7688         if (wol->wolopts & ~WAKE_MAGIC)
7689                 return -EINVAL;
7690         if ((wol->wolopts & WAKE_MAGIC) &&
7691             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7692             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7693                 return -EINVAL;
7694   
7695         spin_lock_bh(&tp->lock);
7696         if (wol->wolopts & WAKE_MAGIC)
7697                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7698         else
7699                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7700         spin_unlock_bh(&tp->lock);
7701   
7702         return 0;
7703 }
7704   
7705 static u32 tg3_get_msglevel(struct net_device *dev)
7706 {
7707         struct tg3 *tp = netdev_priv(dev);
7708         return tp->msg_enable;
7709 }
7710   
7711 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7712 {
7713         struct tg3 *tp = netdev_priv(dev);
7714         tp->msg_enable = value;
7715 }
7716   
7717 #if TG3_TSO_SUPPORT != 0
7718 static int tg3_set_tso(struct net_device *dev, u32 value)
7719 {
7720         struct tg3 *tp = netdev_priv(dev);
7721
7722         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7723                 if (value)
7724                         return -EINVAL;
7725                 return 0;
7726         }
7727         return ethtool_op_set_tso(dev, value);
7728 }
7729 #endif
7730   
7731 static int tg3_nway_reset(struct net_device *dev)
7732 {
7733         struct tg3 *tp = netdev_priv(dev);
7734         u32 bmcr;
7735         int r;
7736   
7737         if (!netif_running(dev))
7738                 return -EAGAIN;
7739
7740         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7741                 return -EINVAL;
7742
7743         spin_lock_bh(&tp->lock);
7744         r = -EINVAL;
7745         tg3_readphy(tp, MII_BMCR, &bmcr);
7746         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7747             ((bmcr & BMCR_ANENABLE) ||
7748              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7749                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7750                                            BMCR_ANENABLE);
7751                 r = 0;
7752         }
7753         spin_unlock_bh(&tp->lock);
7754   
7755         return r;
7756 }
7757   
7758 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7759 {
7760         struct tg3 *tp = netdev_priv(dev);
7761   
7762         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7763         ering->rx_mini_max_pending = 0;
7764         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7765                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7766         else
7767                 ering->rx_jumbo_max_pending = 0;
7768
7769         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7770
7771         ering->rx_pending = tp->rx_pending;
7772         ering->rx_mini_pending = 0;
7773         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7774                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7775         else
7776                 ering->rx_jumbo_pending = 0;
7777
7778         ering->tx_pending = tp->tx_pending;
7779 }
7780   
7781 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7782 {
7783         struct tg3 *tp = netdev_priv(dev);
7784         int irq_sync = 0;
7785   
7786         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7787             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7788             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7789                 return -EINVAL;
7790   
7791         if (netif_running(dev)) {
7792                 tg3_netif_stop(tp);
7793                 irq_sync = 1;
7794         }
7795
7796         tg3_full_lock(tp, irq_sync);
7797   
7798         tp->rx_pending = ering->rx_pending;
7799
7800         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7801             tp->rx_pending > 63)
7802                 tp->rx_pending = 63;
7803         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7804         tp->tx_pending = ering->tx_pending;
7805
7806         if (netif_running(dev)) {
7807                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7808                 tg3_init_hw(tp);
7809                 tg3_netif_start(tp);
7810         }
7811
7812         tg3_full_unlock(tp);
7813   
7814         return 0;
7815 }
7816   
7817 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7818 {
7819         struct tg3 *tp = netdev_priv(dev);
7820   
7821         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7822         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7823         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7824 }
7825   
7826 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7827 {
7828         struct tg3 *tp = netdev_priv(dev);
7829         int irq_sync = 0;
7830   
7831         if (netif_running(dev)) {
7832                 tg3_netif_stop(tp);
7833                 irq_sync = 1;
7834         }
7835
7836         tg3_full_lock(tp, irq_sync);
7837
7838         if (epause->autoneg)
7839                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7840         else
7841                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7842         if (epause->rx_pause)
7843                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7844         else
7845                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7846         if (epause->tx_pause)
7847                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7848         else
7849                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7850
7851         if (netif_running(dev)) {
7852                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7853                 tg3_init_hw(tp);
7854                 tg3_netif_start(tp);
7855         }
7856
7857         tg3_full_unlock(tp);
7858   
7859         return 0;
7860 }
7861   
7862 static u32 tg3_get_rx_csum(struct net_device *dev)
7863 {
7864         struct tg3 *tp = netdev_priv(dev);
7865         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7866 }
7867   
7868 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7869 {
7870         struct tg3 *tp = netdev_priv(dev);
7871   
7872         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7873                 if (data != 0)
7874                         return -EINVAL;
7875                 return 0;
7876         }
7877   
7878         spin_lock_bh(&tp->lock);
7879         if (data)
7880                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7881         else
7882                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7883         spin_unlock_bh(&tp->lock);
7884   
7885         return 0;
7886 }
7887   
7888 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7889 {
7890         struct tg3 *tp = netdev_priv(dev);
7891   
7892         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7893                 if (data != 0)
7894                         return -EINVAL;
7895                 return 0;
7896         }
7897   
7898         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7899                 ethtool_op_set_tx_hw_csum(dev, data);
7900         else
7901                 ethtool_op_set_tx_csum(dev, data);
7902
7903         return 0;
7904 }
7905
7906 static int tg3_get_stats_count (struct net_device *dev)
7907 {
7908         return TG3_NUM_STATS;
7909 }
7910
7911 static int tg3_get_test_count (struct net_device *dev)
7912 {
7913         return TG3_NUM_TEST;
7914 }
7915
7916 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7917 {
7918         switch (stringset) {
7919         case ETH_SS_STATS:
7920                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7921                 break;
7922         case ETH_SS_TEST:
7923                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7924                 break;
7925         default:
7926                 WARN_ON(1);     /* we need a WARN() */
7927                 break;
7928         }
7929 }
7930
7931 static int tg3_phys_id(struct net_device *dev, u32 data)
7932 {
7933         struct tg3 *tp = netdev_priv(dev);
7934         int i;
7935
7936         if (!netif_running(tp->dev))
7937                 return -EAGAIN;
7938
7939         if (data == 0)
7940                 data = 2;
7941
7942         for (i = 0; i < (data * 2); i++) {
7943                 if ((i % 2) == 0)
7944                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7945                                            LED_CTRL_1000MBPS_ON |
7946                                            LED_CTRL_100MBPS_ON |
7947                                            LED_CTRL_10MBPS_ON |
7948                                            LED_CTRL_TRAFFIC_OVERRIDE |
7949                                            LED_CTRL_TRAFFIC_BLINK |
7950                                            LED_CTRL_TRAFFIC_LED);
7951         
7952                 else
7953                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7954                                            LED_CTRL_TRAFFIC_OVERRIDE);
7955
7956                 if (msleep_interruptible(500))
7957                         break;
7958         }
7959         tw32(MAC_LED_CTRL, tp->led_ctrl);
7960         return 0;
7961 }
7962
7963 static void tg3_get_ethtool_stats (struct net_device *dev,
7964                                    struct ethtool_stats *estats, u64 *tmp_stats)
7965 {
7966         struct tg3 *tp = netdev_priv(dev);
7967         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7968 }
7969
7970 #define NVRAM_TEST_SIZE 0x100
7971 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
7972
7973 static int tg3_test_nvram(struct tg3 *tp)
7974 {
7975         u32 *buf, csum, magic;
7976         int i, j, err = 0, size;
7977
7978         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
7979                 return -EIO;
7980
7981         if (magic == TG3_EEPROM_MAGIC)
7982                 size = NVRAM_TEST_SIZE;
7983         else if ((magic & 0xff000000) == 0xa5000000) {
7984                 if ((magic & 0xe00000) == 0x200000)
7985                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
7986                 else
7987                         return 0;
7988         } else
7989                 return -EIO;
7990
7991         buf = kmalloc(size, GFP_KERNEL);
7992         if (buf == NULL)
7993                 return -ENOMEM;
7994
7995         err = -EIO;
7996         for (i = 0, j = 0; i < size; i += 4, j++) {
7997                 u32 val;
7998
7999                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8000                         break;
8001                 buf[j] = cpu_to_le32(val);
8002         }
8003         if (i < size)
8004                 goto out;
8005
8006         /* Selfboot format */
8007         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8008                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8009
8010                 for (i = 0; i < size; i++)
8011                         csum8 += buf8[i];
8012
8013                 if (csum8 == 0)
8014                         return 0;
8015                 return -EIO;
8016         }
8017
8018         /* Bootstrap checksum at offset 0x10 */
8019         csum = calc_crc((unsigned char *) buf, 0x10);
8020         if(csum != cpu_to_le32(buf[0x10/4]))
8021                 goto out;
8022
8023         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8024         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8025         if (csum != cpu_to_le32(buf[0xfc/4]))
8026                  goto out;
8027
8028         err = 0;
8029
8030 out:
8031         kfree(buf);
8032         return err;
8033 }
8034
8035 #define TG3_SERDES_TIMEOUT_SEC  2
8036 #define TG3_COPPER_TIMEOUT_SEC  6
8037
8038 static int tg3_test_link(struct tg3 *tp)
8039 {
8040         int i, max;
8041
8042         if (!netif_running(tp->dev))
8043                 return -ENODEV;
8044
8045         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8046                 max = TG3_SERDES_TIMEOUT_SEC;
8047         else
8048                 max = TG3_COPPER_TIMEOUT_SEC;
8049
8050         for (i = 0; i < max; i++) {
8051                 if (netif_carrier_ok(tp->dev))
8052                         return 0;
8053
8054                 if (msleep_interruptible(1000))
8055                         break;
8056         }
8057
8058         return -EIO;
8059 }
8060
8061 /* Only test the commonly used registers */
8062 static const int tg3_test_registers(struct tg3 *tp)
8063 {
8064         int i, is_5705;
8065         u32 offset, read_mask, write_mask, val, save_val, read_val;
8066         static struct {
8067                 u16 offset;
8068                 u16 flags;
8069 #define TG3_FL_5705     0x1
8070 #define TG3_FL_NOT_5705 0x2
8071 #define TG3_FL_NOT_5788 0x4
8072                 u32 read_mask;
8073                 u32 write_mask;
8074         } reg_tbl[] = {
8075                 /* MAC Control Registers */
8076                 { MAC_MODE, TG3_FL_NOT_5705,
8077                         0x00000000, 0x00ef6f8c },
8078                 { MAC_MODE, TG3_FL_5705,
8079                         0x00000000, 0x01ef6b8c },
8080                 { MAC_STATUS, TG3_FL_NOT_5705,
8081                         0x03800107, 0x00000000 },
8082                 { MAC_STATUS, TG3_FL_5705,
8083                         0x03800100, 0x00000000 },
8084                 { MAC_ADDR_0_HIGH, 0x0000,
8085                         0x00000000, 0x0000ffff },
8086                 { MAC_ADDR_0_LOW, 0x0000,
8087                         0x00000000, 0xffffffff },
8088                 { MAC_RX_MTU_SIZE, 0x0000,
8089                         0x00000000, 0x0000ffff },
8090                 { MAC_TX_MODE, 0x0000,
8091                         0x00000000, 0x00000070 },
8092                 { MAC_TX_LENGTHS, 0x0000,
8093                         0x00000000, 0x00003fff },
8094                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8095                         0x00000000, 0x000007fc },
8096                 { MAC_RX_MODE, TG3_FL_5705,
8097                         0x00000000, 0x000007dc },
8098                 { MAC_HASH_REG_0, 0x0000,
8099                         0x00000000, 0xffffffff },
8100                 { MAC_HASH_REG_1, 0x0000,
8101                         0x00000000, 0xffffffff },
8102                 { MAC_HASH_REG_2, 0x0000,
8103                         0x00000000, 0xffffffff },
8104                 { MAC_HASH_REG_3, 0x0000,
8105                         0x00000000, 0xffffffff },
8106
8107                 /* Receive Data and Receive BD Initiator Control Registers. */
8108                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8109                         0x00000000, 0xffffffff },
8110                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8111                         0x00000000, 0xffffffff },
8112                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8113                         0x00000000, 0x00000003 },
8114                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8115                         0x00000000, 0xffffffff },
8116                 { RCVDBDI_STD_BD+0, 0x0000,
8117                         0x00000000, 0xffffffff },
8118                 { RCVDBDI_STD_BD+4, 0x0000,
8119                         0x00000000, 0xffffffff },
8120                 { RCVDBDI_STD_BD+8, 0x0000,
8121                         0x00000000, 0xffff0002 },
8122                 { RCVDBDI_STD_BD+0xc, 0x0000,
8123                         0x00000000, 0xffffffff },
8124         
8125                 /* Receive BD Initiator Control Registers. */
8126                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8127                         0x00000000, 0xffffffff },
8128                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8129                         0x00000000, 0x000003ff },
8130                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8131                         0x00000000, 0xffffffff },
8132         
8133                 /* Host Coalescing Control Registers. */
8134                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8135                         0x00000000, 0x00000004 },
8136                 { HOSTCC_MODE, TG3_FL_5705,
8137                         0x00000000, 0x000000f6 },
8138                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8139                         0x00000000, 0xffffffff },
8140                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8141                         0x00000000, 0x000003ff },
8142                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8143                         0x00000000, 0xffffffff },
8144                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8145                         0x00000000, 0x000003ff },
8146                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8147                         0x00000000, 0xffffffff },
8148                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8149                         0x00000000, 0x000000ff },
8150                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8151                         0x00000000, 0xffffffff },
8152                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8153                         0x00000000, 0x000000ff },
8154                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8155                         0x00000000, 0xffffffff },
8156                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8157                         0x00000000, 0xffffffff },
8158                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8159                         0x00000000, 0xffffffff },
8160                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8161                         0x00000000, 0x000000ff },
8162                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8163                         0x00000000, 0xffffffff },
8164                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8165                         0x00000000, 0x000000ff },
8166                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8167                         0x00000000, 0xffffffff },
8168                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8169                         0x00000000, 0xffffffff },
8170                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8171                         0x00000000, 0xffffffff },
8172                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8173                         0x00000000, 0xffffffff },
8174                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8177                         0xffffffff, 0x00000000 },
8178                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8179                         0xffffffff, 0x00000000 },
8180
8181                 /* Buffer Manager Control Registers. */
8182                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8183                         0x00000000, 0x007fff80 },
8184                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8185                         0x00000000, 0x007fffff },
8186                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8187                         0x00000000, 0x0000003f },
8188                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8189                         0x00000000, 0x000001ff },
8190                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8191                         0x00000000, 0x000001ff },
8192                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8193                         0xffffffff, 0x00000000 },
8194                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8195                         0xffffffff, 0x00000000 },
8196         
8197                 /* Mailbox Registers */
8198                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8199                         0x00000000, 0x000001ff },
8200                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8201                         0x00000000, 0x000001ff },
8202                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8203                         0x00000000, 0x000007ff },
8204                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8205                         0x00000000, 0x000001ff },
8206
8207                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8208         };
8209
8210         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8211                 is_5705 = 1;
8212         else
8213                 is_5705 = 0;
8214
8215         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8216                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8217                         continue;
8218
8219                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8220                         continue;
8221
8222                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8223                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8224                         continue;
8225
8226                 offset = (u32) reg_tbl[i].offset;
8227                 read_mask = reg_tbl[i].read_mask;
8228                 write_mask = reg_tbl[i].write_mask;
8229
8230                 /* Save the original register content */
8231                 save_val = tr32(offset);
8232
8233                 /* Determine the read-only value. */
8234                 read_val = save_val & read_mask;
8235
8236                 /* Write zero to the register, then make sure the read-only bits
8237                  * are not changed and the read/write bits are all zeros.
8238                  */
8239                 tw32(offset, 0);
8240
8241                 val = tr32(offset);
8242
8243                 /* Test the read-only and read/write bits. */
8244                 if (((val & read_mask) != read_val) || (val & write_mask))
8245                         goto out;
8246
8247                 /* Write ones to all the bits defined by RdMask and WrMask, then
8248                  * make sure the read-only bits are not changed and the
8249                  * read/write bits are all ones.
8250                  */
8251                 tw32(offset, read_mask | write_mask);
8252
8253                 val = tr32(offset);
8254
8255                 /* Test the read-only bits. */
8256                 if ((val & read_mask) != read_val)
8257                         goto out;
8258
8259                 /* Test the read/write bits. */
8260                 if ((val & write_mask) != write_mask)
8261                         goto out;
8262
8263                 tw32(offset, save_val);
8264         }
8265
8266         return 0;
8267
8268 out:
8269         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8270         tw32(offset, save_val);
8271         return -EIO;
8272 }
8273
8274 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8275 {
8276         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8277         int i;
8278         u32 j;
8279
8280         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8281                 for (j = 0; j < len; j += 4) {
8282                         u32 val;
8283
8284                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8285                         tg3_read_mem(tp, offset + j, &val);
8286                         if (val != test_pattern[i])
8287                                 return -EIO;
8288                 }
8289         }
8290         return 0;
8291 }
8292
8293 static int tg3_test_memory(struct tg3 *tp)
8294 {
8295         static struct mem_entry {
8296                 u32 offset;
8297                 u32 len;
8298         } mem_tbl_570x[] = {
8299                 { 0x00000000, 0x00b50},
8300                 { 0x00002000, 0x1c000},
8301                 { 0xffffffff, 0x00000}
8302         }, mem_tbl_5705[] = {
8303                 { 0x00000100, 0x0000c},
8304                 { 0x00000200, 0x00008},
8305                 { 0x00004000, 0x00800},
8306                 { 0x00006000, 0x01000},
8307                 { 0x00008000, 0x02000},
8308                 { 0x00010000, 0x0e000},
8309                 { 0xffffffff, 0x00000}
8310         }, mem_tbl_5755[] = {
8311                 { 0x00000200, 0x00008},
8312                 { 0x00004000, 0x00800},
8313                 { 0x00006000, 0x00800},
8314                 { 0x00008000, 0x02000},
8315                 { 0x00010000, 0x0c000},
8316                 { 0xffffffff, 0x00000}
8317         };
8318         struct mem_entry *mem_tbl;
8319         int err = 0;
8320         int i;
8321
8322         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8323                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8324                         mem_tbl = mem_tbl_5755;
8325                 else
8326                         mem_tbl = mem_tbl_5705;
8327         } else
8328                 mem_tbl = mem_tbl_570x;
8329
8330         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8331                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8332                     mem_tbl[i].len)) != 0)
8333                         break;
8334         }
8335         
8336         return err;
8337 }
8338
8339 #define TG3_MAC_LOOPBACK        0
8340 #define TG3_PHY_LOOPBACK        1
8341
8342 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8343 {
8344         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8345         u32 desc_idx;
8346         struct sk_buff *skb, *rx_skb;
8347         u8 *tx_data;
8348         dma_addr_t map;
8349         int num_pkts, tx_len, rx_len, i, err;
8350         struct tg3_rx_buffer_desc *desc;
8351
8352         if (loopback_mode == TG3_MAC_LOOPBACK) {
8353                 /* HW errata - mac loopback fails in some cases on 5780.
8354                  * Normal traffic and PHY loopback are not affected by
8355                  * errata.
8356                  */
8357                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8358                         return 0;
8359
8360                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8361                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8362                            MAC_MODE_PORT_MODE_GMII;
8363                 tw32(MAC_MODE, mac_mode);
8364         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8365                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8366                                            BMCR_SPEED1000);
8367                 udelay(40);
8368                 /* reset to prevent losing 1st rx packet intermittently */
8369                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8370                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8371                         udelay(10);
8372                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8373                 }
8374                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8375                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8376                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
8377                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8378                 tw32(MAC_MODE, mac_mode);
8379         }
8380         else
8381                 return -EINVAL;
8382
8383         err = -EIO;
8384
8385         tx_len = 1514;
8386         skb = dev_alloc_skb(tx_len);
8387         tx_data = skb_put(skb, tx_len);
8388         memcpy(tx_data, tp->dev->dev_addr, 6);
8389         memset(tx_data + 6, 0x0, 8);
8390
8391         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8392
8393         for (i = 14; i < tx_len; i++)
8394                 tx_data[i] = (u8) (i & 0xff);
8395
8396         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8397
8398         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8399              HOSTCC_MODE_NOW);
8400
8401         udelay(10);
8402
8403         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8404
8405         num_pkts = 0;
8406
8407         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8408
8409         tp->tx_prod++;
8410         num_pkts++;
8411
8412         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8413                      tp->tx_prod);
8414         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8415
8416         udelay(10);
8417
8418         for (i = 0; i < 10; i++) {
8419                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8420                        HOSTCC_MODE_NOW);
8421
8422                 udelay(10);
8423
8424                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8425                 rx_idx = tp->hw_status->idx[0].rx_producer;
8426                 if ((tx_idx == tp->tx_prod) &&
8427                     (rx_idx == (rx_start_idx + num_pkts)))
8428                         break;
8429         }
8430
8431         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8432         dev_kfree_skb(skb);
8433
8434         if (tx_idx != tp->tx_prod)
8435                 goto out;
8436
8437         if (rx_idx != rx_start_idx + num_pkts)
8438                 goto out;
8439
8440         desc = &tp->rx_rcb[rx_start_idx];
8441         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8442         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8443         if (opaque_key != RXD_OPAQUE_RING_STD)
8444                 goto out;
8445
8446         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8447             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8448                 goto out;
8449
8450         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8451         if (rx_len != tx_len)
8452                 goto out;
8453
8454         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8455
8456         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8457         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8458
8459         for (i = 14; i < tx_len; i++) {
8460                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8461                         goto out;
8462         }
8463         err = 0;
8464         
8465         /* tg3_free_rings will unmap and free the rx_skb */
8466 out:
8467         return err;
8468 }
8469
8470 #define TG3_MAC_LOOPBACK_FAILED         1
8471 #define TG3_PHY_LOOPBACK_FAILED         2
8472 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8473                                          TG3_PHY_LOOPBACK_FAILED)
8474
8475 static int tg3_test_loopback(struct tg3 *tp)
8476 {
8477         int err = 0;
8478
8479         if (!netif_running(tp->dev))
8480                 return TG3_LOOPBACK_FAILED;
8481
8482         tg3_reset_hw(tp);
8483
8484         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8485                 err |= TG3_MAC_LOOPBACK_FAILED;
8486         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8487                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8488                         err |= TG3_PHY_LOOPBACK_FAILED;
8489         }
8490
8491         return err;
8492 }
8493
8494 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8495                           u64 *data)
8496 {
8497         struct tg3 *tp = netdev_priv(dev);
8498
8499         if (tp->link_config.phy_is_low_power)
8500                 tg3_set_power_state(tp, PCI_D0);
8501
8502         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8503
8504         if (tg3_test_nvram(tp) != 0) {
8505                 etest->flags |= ETH_TEST_FL_FAILED;
8506                 data[0] = 1;
8507         }
8508         if (tg3_test_link(tp) != 0) {
8509                 etest->flags |= ETH_TEST_FL_FAILED;
8510                 data[1] = 1;
8511         }
8512         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8513                 int err, irq_sync = 0;
8514
8515                 if (netif_running(dev)) {
8516                         tg3_netif_stop(tp);
8517                         irq_sync = 1;
8518                 }
8519
8520                 tg3_full_lock(tp, irq_sync);
8521
8522                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8523                 err = tg3_nvram_lock(tp);
8524                 tg3_halt_cpu(tp, RX_CPU_BASE);
8525                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8526                         tg3_halt_cpu(tp, TX_CPU_BASE);
8527                 if (!err)
8528                         tg3_nvram_unlock(tp);
8529
8530                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8531                         tg3_phy_reset(tp);
8532
8533                 if (tg3_test_registers(tp) != 0) {
8534                         etest->flags |= ETH_TEST_FL_FAILED;
8535                         data[2] = 1;
8536                 }
8537                 if (tg3_test_memory(tp) != 0) {
8538                         etest->flags |= ETH_TEST_FL_FAILED;
8539                         data[3] = 1;
8540                 }
8541                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8542                         etest->flags |= ETH_TEST_FL_FAILED;
8543
8544                 tg3_full_unlock(tp);
8545
8546                 if (tg3_test_interrupt(tp) != 0) {
8547                         etest->flags |= ETH_TEST_FL_FAILED;
8548                         data[5] = 1;
8549                 }
8550
8551                 tg3_full_lock(tp, 0);
8552
8553                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8554                 if (netif_running(dev)) {
8555                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8556                         tg3_init_hw(tp);
8557                         tg3_netif_start(tp);
8558                 }
8559
8560                 tg3_full_unlock(tp);
8561         }
8562         if (tp->link_config.phy_is_low_power)
8563                 tg3_set_power_state(tp, PCI_D3hot);
8564
8565 }
8566
8567 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8568 {
8569         struct mii_ioctl_data *data = if_mii(ifr);
8570         struct tg3 *tp = netdev_priv(dev);
8571         int err;
8572
8573         switch(cmd) {
8574         case SIOCGMIIPHY:
8575                 data->phy_id = PHY_ADDR;
8576
8577                 /* fallthru */
8578         case SIOCGMIIREG: {
8579                 u32 mii_regval;
8580
8581                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8582                         break;                  /* We have no PHY */
8583
8584                 if (tp->link_config.phy_is_low_power)
8585                         return -EAGAIN;
8586
8587                 spin_lock_bh(&tp->lock);
8588                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8589                 spin_unlock_bh(&tp->lock);
8590
8591                 data->val_out = mii_regval;
8592
8593                 return err;
8594         }
8595
8596         case SIOCSMIIREG:
8597                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8598                         break;                  /* We have no PHY */
8599
8600                 if (!capable(CAP_NET_ADMIN))
8601                         return -EPERM;
8602
8603                 if (tp->link_config.phy_is_low_power)
8604                         return -EAGAIN;
8605
8606                 spin_lock_bh(&tp->lock);
8607                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8608                 spin_unlock_bh(&tp->lock);
8609
8610                 return err;
8611
8612         default:
8613                 /* do nothing */
8614                 break;
8615         }
8616         return -EOPNOTSUPP;
8617 }
8618
8619 #if TG3_VLAN_TAG_USED
8620 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8621 {
8622         struct tg3 *tp = netdev_priv(dev);
8623
8624         tg3_full_lock(tp, 0);
8625
8626         tp->vlgrp = grp;
8627
8628         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8629         __tg3_set_rx_mode(dev);
8630
8631         tg3_full_unlock(tp);
8632 }
8633
8634 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8635 {
8636         struct tg3 *tp = netdev_priv(dev);
8637
8638         tg3_full_lock(tp, 0);
8639         if (tp->vlgrp)
8640                 tp->vlgrp->vlan_devices[vid] = NULL;
8641         tg3_full_unlock(tp);
8642 }
8643 #endif
8644
8645 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8646 {
8647         struct tg3 *tp = netdev_priv(dev);
8648
8649         memcpy(ec, &tp->coal, sizeof(*ec));
8650         return 0;
8651 }
8652
8653 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8654 {
8655         struct tg3 *tp = netdev_priv(dev);
8656         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8657         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8658
8659         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8660                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8661                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8662                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8663                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8664         }
8665
8666         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8667             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8668             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8669             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8670             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8671             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8672             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8673             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8674             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8675             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8676                 return -EINVAL;
8677
8678         /* No rx interrupts will be generated if both are zero */
8679         if ((ec->rx_coalesce_usecs == 0) &&
8680             (ec->rx_max_coalesced_frames == 0))
8681                 return -EINVAL;
8682
8683         /* No tx interrupts will be generated if both are zero */
8684         if ((ec->tx_coalesce_usecs == 0) &&
8685             (ec->tx_max_coalesced_frames == 0))
8686                 return -EINVAL;
8687
8688         /* Only copy relevant parameters, ignore all others. */
8689         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8690         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8691         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8692         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8693         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8694         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8695         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8696         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8697         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8698
8699         if (netif_running(dev)) {
8700                 tg3_full_lock(tp, 0);
8701                 __tg3_set_coalesce(tp, &tp->coal);
8702                 tg3_full_unlock(tp);
8703         }
8704         return 0;
8705 }
8706
8707 static struct ethtool_ops tg3_ethtool_ops = {
8708         .get_settings           = tg3_get_settings,
8709         .set_settings           = tg3_set_settings,
8710         .get_drvinfo            = tg3_get_drvinfo,
8711         .get_regs_len           = tg3_get_regs_len,
8712         .get_regs               = tg3_get_regs,
8713         .get_wol                = tg3_get_wol,
8714         .set_wol                = tg3_set_wol,
8715         .get_msglevel           = tg3_get_msglevel,
8716         .set_msglevel           = tg3_set_msglevel,
8717         .nway_reset             = tg3_nway_reset,
8718         .get_link               = ethtool_op_get_link,
8719         .get_eeprom_len         = tg3_get_eeprom_len,
8720         .get_eeprom             = tg3_get_eeprom,
8721         .set_eeprom             = tg3_set_eeprom,
8722         .get_ringparam          = tg3_get_ringparam,
8723         .set_ringparam          = tg3_set_ringparam,
8724         .get_pauseparam         = tg3_get_pauseparam,
8725         .set_pauseparam         = tg3_set_pauseparam,
8726         .get_rx_csum            = tg3_get_rx_csum,
8727         .set_rx_csum            = tg3_set_rx_csum,
8728         .get_tx_csum            = ethtool_op_get_tx_csum,
8729         .set_tx_csum            = tg3_set_tx_csum,
8730         .get_sg                 = ethtool_op_get_sg,
8731         .set_sg                 = ethtool_op_set_sg,
8732 #if TG3_TSO_SUPPORT != 0
8733         .get_tso                = ethtool_op_get_tso,
8734         .set_tso                = tg3_set_tso,
8735 #endif
8736         .self_test_count        = tg3_get_test_count,
8737         .self_test              = tg3_self_test,
8738         .get_strings            = tg3_get_strings,
8739         .phys_id                = tg3_phys_id,
8740         .get_stats_count        = tg3_get_stats_count,
8741         .get_ethtool_stats      = tg3_get_ethtool_stats,
8742         .get_coalesce           = tg3_get_coalesce,
8743         .set_coalesce           = tg3_set_coalesce,
8744         .get_perm_addr          = ethtool_op_get_perm_addr,
8745 };
8746
8747 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8748 {
8749         u32 cursize, val, magic;
8750
8751         tp->nvram_size = EEPROM_CHIP_SIZE;
8752
8753         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8754                 return;
8755
8756         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8757                 return;
8758
8759         /*
8760          * Size the chip by reading offsets at increasing powers of two.
8761          * When we encounter our validation signature, we know the addressing
8762          * has wrapped around, and thus have our chip size.
8763          */
8764         cursize = 0x10;
8765
8766         while (cursize < tp->nvram_size) {
8767                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8768                         return;
8769
8770                 if (val == magic)
8771                         break;
8772
8773                 cursize <<= 1;
8774         }
8775
8776         tp->nvram_size = cursize;
8777 }
8778                 
8779 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8780 {
8781         u32 val;
8782
8783         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8784                 return;
8785
8786         /* Selfboot format */
8787         if (val != TG3_EEPROM_MAGIC) {
8788                 tg3_get_eeprom_size(tp);
8789                 return;
8790         }
8791
8792         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8793                 if (val != 0) {
8794                         tp->nvram_size = (val >> 16) * 1024;
8795                         return;
8796                 }
8797         }
8798         tp->nvram_size = 0x20000;
8799 }
8800
8801 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8802 {
8803         u32 nvcfg1;
8804
8805         nvcfg1 = tr32(NVRAM_CFG1);
8806         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8807                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8808         }
8809         else {
8810                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8811                 tw32(NVRAM_CFG1, nvcfg1);
8812         }
8813
8814         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8815             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8816                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8817                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8818                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8819                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8820                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8821                                 break;
8822                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8823                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8824                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8825                                 break;
8826                         case FLASH_VENDOR_ATMEL_EEPROM:
8827                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8828                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8829                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8830                                 break;
8831                         case FLASH_VENDOR_ST:
8832                                 tp->nvram_jedecnum = JEDEC_ST;
8833                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8834                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8835                                 break;
8836                         case FLASH_VENDOR_SAIFUN:
8837                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8838                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8839                                 break;
8840                         case FLASH_VENDOR_SST_SMALL:
8841                         case FLASH_VENDOR_SST_LARGE:
8842                                 tp->nvram_jedecnum = JEDEC_SST;
8843                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8844                                 break;
8845                 }
8846         }
8847         else {
8848                 tp->nvram_jedecnum = JEDEC_ATMEL;
8849                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8850                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8851         }
8852 }
8853
8854 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8855 {
8856         u32 nvcfg1;
8857
8858         nvcfg1 = tr32(NVRAM_CFG1);
8859
8860         /* NVRAM protection for TPM */
8861         if (nvcfg1 & (1 << 27))
8862                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8863
8864         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8865                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8866                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8867                         tp->nvram_jedecnum = JEDEC_ATMEL;
8868                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8869                         break;
8870                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8871                         tp->nvram_jedecnum = JEDEC_ATMEL;
8872                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8873                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8874                         break;
8875                 case FLASH_5752VENDOR_ST_M45PE10:
8876                 case FLASH_5752VENDOR_ST_M45PE20:
8877                 case FLASH_5752VENDOR_ST_M45PE40:
8878                         tp->nvram_jedecnum = JEDEC_ST;
8879                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8880                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8881                         break;
8882         }
8883
8884         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8885                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8886                         case FLASH_5752PAGE_SIZE_256:
8887                                 tp->nvram_pagesize = 256;
8888                                 break;
8889                         case FLASH_5752PAGE_SIZE_512:
8890                                 tp->nvram_pagesize = 512;
8891                                 break;
8892                         case FLASH_5752PAGE_SIZE_1K:
8893                                 tp->nvram_pagesize = 1024;
8894                                 break;
8895                         case FLASH_5752PAGE_SIZE_2K:
8896                                 tp->nvram_pagesize = 2048;
8897                                 break;
8898                         case FLASH_5752PAGE_SIZE_4K:
8899                                 tp->nvram_pagesize = 4096;
8900                                 break;
8901                         case FLASH_5752PAGE_SIZE_264:
8902                                 tp->nvram_pagesize = 264;
8903                                 break;
8904                 }
8905         }
8906         else {
8907                 /* For eeprom, set pagesize to maximum eeprom size */
8908                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8909
8910                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8911                 tw32(NVRAM_CFG1, nvcfg1);
8912         }
8913 }
8914
8915 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8916 {
8917         u32 nvcfg1;
8918
8919         nvcfg1 = tr32(NVRAM_CFG1);
8920
8921         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8922                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
8923                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
8924                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
8925                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
8926                         tp->nvram_jedecnum = JEDEC_ATMEL;
8927                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8928                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8929
8930                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8931                         tw32(NVRAM_CFG1, nvcfg1);
8932                         break;
8933                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8934                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8935                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8936                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8937                         tp->nvram_jedecnum = JEDEC_ATMEL;
8938                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8939                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8940                         tp->nvram_pagesize = 264;
8941                         break;
8942                 case FLASH_5752VENDOR_ST_M45PE10:
8943                 case FLASH_5752VENDOR_ST_M45PE20:
8944                 case FLASH_5752VENDOR_ST_M45PE40:
8945                         tp->nvram_jedecnum = JEDEC_ST;
8946                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8947                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8948                         tp->nvram_pagesize = 256;
8949                         break;
8950         }
8951 }
8952
8953 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8954 static void __devinit tg3_nvram_init(struct tg3 *tp)
8955 {
8956         int j;
8957
8958         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
8959                 return;
8960
8961         tw32_f(GRC_EEPROM_ADDR,
8962              (EEPROM_ADDR_FSM_RESET |
8963               (EEPROM_DEFAULT_CLOCK_PERIOD <<
8964                EEPROM_ADDR_CLKPERD_SHIFT)));
8965
8966         /* XXX schedule_timeout() ... */
8967         for (j = 0; j < 100; j++)
8968                 udelay(10);
8969
8970         /* Enable seeprom accesses. */
8971         tw32_f(GRC_LOCAL_CTRL,
8972              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
8973         udelay(100);
8974
8975         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8976             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
8977                 tp->tg3_flags |= TG3_FLAG_NVRAM;
8978
8979                 if (tg3_nvram_lock(tp)) {
8980                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
8981                                "tg3_nvram_init failed.\n", tp->dev->name);
8982                         return;
8983                 }
8984                 tg3_enable_nvram_access(tp);
8985
8986                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8987                         tg3_get_5752_nvram_info(tp);
8988                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8989                         tg3_get_5787_nvram_info(tp);
8990                 else
8991                         tg3_get_nvram_info(tp);
8992
8993                 tg3_get_nvram_size(tp);
8994
8995                 tg3_disable_nvram_access(tp);
8996                 tg3_nvram_unlock(tp);
8997
8998         } else {
8999                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9000
9001                 tg3_get_eeprom_size(tp);
9002         }
9003 }
9004
9005 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9006                                         u32 offset, u32 *val)
9007 {
9008         u32 tmp;
9009         int i;
9010
9011         if (offset > EEPROM_ADDR_ADDR_MASK ||
9012             (offset % 4) != 0)
9013                 return -EINVAL;
9014
9015         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9016                                         EEPROM_ADDR_DEVID_MASK |
9017                                         EEPROM_ADDR_READ);
9018         tw32(GRC_EEPROM_ADDR,
9019              tmp |
9020              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9021              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9022               EEPROM_ADDR_ADDR_MASK) |
9023              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9024
9025         for (i = 0; i < 10000; i++) {
9026                 tmp = tr32(GRC_EEPROM_ADDR);
9027
9028                 if (tmp & EEPROM_ADDR_COMPLETE)
9029                         break;
9030                 udelay(100);
9031         }
9032         if (!(tmp & EEPROM_ADDR_COMPLETE))
9033                 return -EBUSY;
9034
9035         *val = tr32(GRC_EEPROM_DATA);
9036         return 0;
9037 }
9038
9039 #define NVRAM_CMD_TIMEOUT 10000
9040
9041 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9042 {
9043         int i;
9044
9045         tw32(NVRAM_CMD, nvram_cmd);
9046         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9047                 udelay(10);
9048                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9049                         udelay(10);
9050                         break;
9051                 }
9052         }
9053         if (i == NVRAM_CMD_TIMEOUT) {
9054                 return -EBUSY;
9055         }
9056         return 0;
9057 }
9058
9059 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9060 {
9061         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9062             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9063             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9064             (tp->nvram_jedecnum == JEDEC_ATMEL))
9065
9066                 addr = ((addr / tp->nvram_pagesize) <<
9067                         ATMEL_AT45DB0X1B_PAGE_POS) +
9068                        (addr % tp->nvram_pagesize);
9069
9070         return addr;
9071 }
9072
9073 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9074 {
9075         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9076             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9077             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9078             (tp->nvram_jedecnum == JEDEC_ATMEL))
9079
9080                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9081                         tp->nvram_pagesize) +
9082                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9083
9084         return addr;
9085 }
9086
9087 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9088 {
9089         int ret;
9090
9091         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9092                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9093                 return -EINVAL;
9094         }
9095
9096         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9097                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9098
9099         offset = tg3_nvram_phys_addr(tp, offset);
9100
9101         if (offset > NVRAM_ADDR_MSK)
9102                 return -EINVAL;
9103
9104         ret = tg3_nvram_lock(tp);
9105         if (ret)
9106                 return ret;
9107
9108         tg3_enable_nvram_access(tp);
9109
9110         tw32(NVRAM_ADDR, offset);
9111         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9112                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9113
9114         if (ret == 0)
9115                 *val = swab32(tr32(NVRAM_RDDATA));
9116
9117         tg3_disable_nvram_access(tp);
9118
9119         tg3_nvram_unlock(tp);
9120
9121         return ret;
9122 }
9123
9124 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9125 {
9126         int err;
9127         u32 tmp;
9128
9129         err = tg3_nvram_read(tp, offset, &tmp);
9130         *val = swab32(tmp);
9131         return err;
9132 }
9133
9134 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9135                                     u32 offset, u32 len, u8 *buf)
9136 {
9137         int i, j, rc = 0;
9138         u32 val;
9139
9140         for (i = 0; i < len; i += 4) {
9141                 u32 addr, data;
9142
9143                 addr = offset + i;
9144
9145                 memcpy(&data, buf + i, 4);
9146
9147                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9148
9149                 val = tr32(GRC_EEPROM_ADDR);
9150                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9151
9152                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9153                         EEPROM_ADDR_READ);
9154                 tw32(GRC_EEPROM_ADDR, val |
9155                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9156                         (addr & EEPROM_ADDR_ADDR_MASK) |
9157                         EEPROM_ADDR_START |
9158                         EEPROM_ADDR_WRITE);
9159                 
9160                 for (j = 0; j < 10000; j++) {
9161                         val = tr32(GRC_EEPROM_ADDR);
9162
9163                         if (val & EEPROM_ADDR_COMPLETE)
9164                                 break;
9165                         udelay(100);
9166                 }
9167                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9168                         rc = -EBUSY;
9169                         break;
9170                 }
9171         }
9172
9173         return rc;
9174 }
9175
9176 /* offset and length are dword aligned */
9177 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9178                 u8 *buf)
9179 {
9180         int ret = 0;
9181         u32 pagesize = tp->nvram_pagesize;
9182         u32 pagemask = pagesize - 1;
9183         u32 nvram_cmd;
9184         u8 *tmp;
9185
9186         tmp = kmalloc(pagesize, GFP_KERNEL);
9187         if (tmp == NULL)
9188                 return -ENOMEM;
9189
9190         while (len) {
9191                 int j;
9192                 u32 phy_addr, page_off, size;
9193
9194                 phy_addr = offset & ~pagemask;
9195         
9196                 for (j = 0; j < pagesize; j += 4) {
9197                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9198                                                 (u32 *) (tmp + j))))
9199                                 break;
9200                 }
9201                 if (ret)
9202                         break;
9203
9204                 page_off = offset & pagemask;
9205                 size = pagesize;
9206                 if (len < size)
9207                         size = len;
9208
9209                 len -= size;
9210
9211                 memcpy(tmp + page_off, buf, size);
9212
9213                 offset = offset + (pagesize - page_off);
9214
9215                 tg3_enable_nvram_access(tp);
9216
9217                 /*
9218                  * Before we can erase the flash page, we need
9219                  * to issue a special "write enable" command.
9220                  */
9221                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9222
9223                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9224                         break;
9225
9226                 /* Erase the target page */
9227                 tw32(NVRAM_ADDR, phy_addr);
9228
9229                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9230                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9231
9232                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9233                         break;
9234
9235                 /* Issue another write enable to start the write. */
9236                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9237
9238                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9239                         break;
9240
9241                 for (j = 0; j < pagesize; j += 4) {
9242                         u32 data;
9243
9244                         data = *((u32 *) (tmp + j));
9245                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9246
9247                         tw32(NVRAM_ADDR, phy_addr + j);
9248
9249                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9250                                 NVRAM_CMD_WR;
9251
9252                         if (j == 0)
9253                                 nvram_cmd |= NVRAM_CMD_FIRST;
9254                         else if (j == (pagesize - 4))
9255                                 nvram_cmd |= NVRAM_CMD_LAST;
9256
9257                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9258                                 break;
9259                 }
9260                 if (ret)
9261                         break;
9262         }
9263
9264         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9265         tg3_nvram_exec_cmd(tp, nvram_cmd);
9266
9267         kfree(tmp);
9268
9269         return ret;
9270 }
9271
9272 /* offset and length are dword aligned */
9273 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9274                 u8 *buf)
9275 {
9276         int i, ret = 0;
9277
9278         for (i = 0; i < len; i += 4, offset += 4) {
9279                 u32 data, page_off, phy_addr, nvram_cmd;
9280
9281                 memcpy(&data, buf + i, 4);
9282                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9283
9284                 page_off = offset % tp->nvram_pagesize;
9285
9286                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9287
9288                 tw32(NVRAM_ADDR, phy_addr);
9289
9290                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9291
9292                 if ((page_off == 0) || (i == 0))
9293                         nvram_cmd |= NVRAM_CMD_FIRST;
9294                 else if (page_off == (tp->nvram_pagesize - 4))
9295                         nvram_cmd |= NVRAM_CMD_LAST;
9296
9297                 if (i == (len - 4))
9298                         nvram_cmd |= NVRAM_CMD_LAST;
9299
9300                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9301                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9302                     (tp->nvram_jedecnum == JEDEC_ST) &&
9303                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9304
9305                         if ((ret = tg3_nvram_exec_cmd(tp,
9306                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9307                                 NVRAM_CMD_DONE)))
9308
9309                                 break;
9310                 }
9311                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9312                         /* We always do complete word writes to eeprom. */
9313                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9314                 }
9315
9316                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9317                         break;
9318         }
9319         return ret;
9320 }
9321
9322 /* offset and length are dword aligned */
9323 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9324 {
9325         int ret;
9326
9327         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9328                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9329                 return -EINVAL;
9330         }
9331
9332         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9333                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9334                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9335                 udelay(40);
9336         }
9337
9338         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9339                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9340         }
9341         else {
9342                 u32 grc_mode;
9343
9344                 ret = tg3_nvram_lock(tp);
9345                 if (ret)
9346                         return ret;
9347
9348                 tg3_enable_nvram_access(tp);
9349                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9350                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9351                         tw32(NVRAM_WRITE1, 0x406);
9352
9353                 grc_mode = tr32(GRC_MODE);
9354                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9355
9356                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9357                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9358
9359                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9360                                 buf);
9361                 }
9362                 else {
9363                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9364                                 buf);
9365                 }
9366
9367                 grc_mode = tr32(GRC_MODE);
9368                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9369
9370                 tg3_disable_nvram_access(tp);
9371                 tg3_nvram_unlock(tp);
9372         }
9373
9374         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9375                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9376                 udelay(40);
9377         }
9378
9379         return ret;
9380 }
9381
9382 struct subsys_tbl_ent {
9383         u16 subsys_vendor, subsys_devid;
9384         u32 phy_id;
9385 };
9386
9387 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9388         /* Broadcom boards. */
9389         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9390         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9391         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9392         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9393         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9394         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9395         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9396         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9397         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9398         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9399         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9400
9401         /* 3com boards. */
9402         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9403         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9404         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9405         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9406         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9407
9408         /* DELL boards. */
9409         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9410         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9411         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9412         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9413
9414         /* Compaq boards. */
9415         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9416         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9417         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9418         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9419         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9420
9421         /* IBM boards. */
9422         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9423 };
9424
9425 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9426 {
9427         int i;
9428
9429         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9430                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9431                      tp->pdev->subsystem_vendor) &&
9432                     (subsys_id_to_phy_id[i].subsys_devid ==
9433                      tp->pdev->subsystem_device))
9434                         return &subsys_id_to_phy_id[i];
9435         }
9436         return NULL;
9437 }
9438
9439 /* Since this function may be called in D3-hot power state during
9440  * tg3_init_one(), only config cycles are allowed.
9441  */
9442 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9443 {
9444         u32 val;
9445
9446         /* Make sure register accesses (indirect or otherwise)
9447          * will function correctly.
9448          */
9449         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9450                                tp->misc_host_ctrl);
9451
9452         tp->phy_id = PHY_ID_INVALID;
9453         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9454
9455         /* Do not even try poking around in here on Sun parts.  */
9456         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9457                 return;
9458
9459         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9460         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9461                 u32 nic_cfg, led_cfg;
9462                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9463                 int eeprom_phy_serdes = 0;
9464
9465                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9466                 tp->nic_sram_data_cfg = nic_cfg;
9467
9468                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9469                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9470                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9471                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9472                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9473                     (ver > 0) && (ver < 0x100))
9474                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9475
9476                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9477                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9478                         eeprom_phy_serdes = 1;
9479
9480                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9481                 if (nic_phy_id != 0) {
9482                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9483                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9484
9485                         eeprom_phy_id  = (id1 >> 16) << 10;
9486                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9487                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9488                 } else
9489                         eeprom_phy_id = 0;
9490
9491                 tp->phy_id = eeprom_phy_id;
9492                 if (eeprom_phy_serdes) {
9493                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9494                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9495                         else
9496                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9497                 }
9498
9499                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9500                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9501                                     SHASTA_EXT_LED_MODE_MASK);
9502                 else
9503                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9504
9505                 switch (led_cfg) {
9506                 default:
9507                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9508                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9509                         break;
9510
9511                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9512                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9513                         break;
9514
9515                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9516                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9517
9518                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9519                          * read on some older 5700/5701 bootcode.
9520                          */
9521                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9522                             ASIC_REV_5700 ||
9523                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9524                             ASIC_REV_5701)
9525                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9526
9527                         break;
9528
9529                 case SHASTA_EXT_LED_SHARED:
9530                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9531                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9532                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9533                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9534                                                  LED_CTRL_MODE_PHY_2);
9535                         break;
9536
9537                 case SHASTA_EXT_LED_MAC:
9538                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9539                         break;
9540
9541                 case SHASTA_EXT_LED_COMBO:
9542                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9543                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9544                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9545                                                  LED_CTRL_MODE_PHY_2);
9546                         break;
9547
9548                 };
9549
9550                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9551                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9552                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9553                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9554
9555                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9556                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9557                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9558                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9559
9560                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9561                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9562                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9563                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9564                 }
9565                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9566                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9567
9568                 if (cfg2 & (1 << 17))
9569                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9570
9571                 /* serdes signal pre-emphasis in register 0x590 set by */
9572                 /* bootcode if bit 18 is set */
9573                 if (cfg2 & (1 << 18))
9574                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9575         }
9576 }
9577
9578 static int __devinit tg3_phy_probe(struct tg3 *tp)
9579 {
9580         u32 hw_phy_id_1, hw_phy_id_2;
9581         u32 hw_phy_id, hw_phy_id_masked;
9582         int err;
9583
9584         /* Reading the PHY ID register can conflict with ASF
9585          * firwmare access to the PHY hardware.
9586          */
9587         err = 0;
9588         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9589                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9590         } else {
9591                 /* Now read the physical PHY_ID from the chip and verify
9592                  * that it is sane.  If it doesn't look good, we fall back
9593                  * to either the hard-coded table based PHY_ID and failing
9594                  * that the value found in the eeprom area.
9595                  */
9596                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9597                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9598
9599                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9600                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9601                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9602
9603                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9604         }
9605
9606         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9607                 tp->phy_id = hw_phy_id;
9608                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9609                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9610                 else
9611                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9612         } else {
9613                 if (tp->phy_id != PHY_ID_INVALID) {
9614                         /* Do nothing, phy ID already set up in
9615                          * tg3_get_eeprom_hw_cfg().
9616                          */
9617                 } else {
9618                         struct subsys_tbl_ent *p;
9619
9620                         /* No eeprom signature?  Try the hardcoded
9621                          * subsys device table.
9622                          */
9623                         p = lookup_by_subsys(tp);
9624                         if (!p)
9625                                 return -ENODEV;
9626
9627                         tp->phy_id = p->phy_id;
9628                         if (!tp->phy_id ||
9629                             tp->phy_id == PHY_ID_BCM8002)
9630                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9631                 }
9632         }
9633
9634         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9635             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9636                 u32 bmsr, adv_reg, tg3_ctrl;
9637
9638                 tg3_readphy(tp, MII_BMSR, &bmsr);
9639                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9640                     (bmsr & BMSR_LSTATUS))
9641                         goto skip_phy_reset;
9642                     
9643                 err = tg3_phy_reset(tp);
9644                 if (err)
9645                         return err;
9646
9647                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9648                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9649                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9650                 tg3_ctrl = 0;
9651                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9652                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9653                                     MII_TG3_CTRL_ADV_1000_FULL);
9654                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9655                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9656                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9657                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9658                 }
9659
9660                 if (!tg3_copper_is_advertising_all(tp)) {
9661                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9662
9663                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9664                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9665
9666                         tg3_writephy(tp, MII_BMCR,
9667                                      BMCR_ANENABLE | BMCR_ANRESTART);
9668                 }
9669                 tg3_phy_set_wirespeed(tp);
9670
9671                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9672                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9673                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9674         }
9675
9676 skip_phy_reset:
9677         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9678                 err = tg3_init_5401phy_dsp(tp);
9679                 if (err)
9680                         return err;
9681         }
9682
9683         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9684                 err = tg3_init_5401phy_dsp(tp);
9685         }
9686
9687         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9688                 tp->link_config.advertising =
9689                         (ADVERTISED_1000baseT_Half |
9690                          ADVERTISED_1000baseT_Full |
9691                          ADVERTISED_Autoneg |
9692                          ADVERTISED_FIBRE);
9693         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9694                 tp->link_config.advertising &=
9695                         ~(ADVERTISED_1000baseT_Half |
9696                           ADVERTISED_1000baseT_Full);
9697
9698         return err;
9699 }
9700
9701 static void __devinit tg3_read_partno(struct tg3 *tp)
9702 {
9703         unsigned char vpd_data[256];
9704         int i;
9705         u32 magic;
9706
9707         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9708                 /* Sun decided not to put the necessary bits in the
9709                  * NVRAM of their onboard tg3 parts :(
9710                  */
9711                 strcpy(tp->board_part_number, "Sun 570X");
9712                 return;
9713         }
9714
9715         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9716                 return;
9717
9718         if (magic == TG3_EEPROM_MAGIC) {
9719                 for (i = 0; i < 256; i += 4) {
9720                         u32 tmp;
9721
9722                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9723                                 goto out_not_found;
9724
9725                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9726                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9727                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9728                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9729                 }
9730         } else {
9731                 int vpd_cap;
9732
9733                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9734                 for (i = 0; i < 256; i += 4) {
9735                         u32 tmp, j = 0;
9736                         u16 tmp16;
9737
9738                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9739                                               i);
9740                         while (j++ < 100) {
9741                                 pci_read_config_word(tp->pdev, vpd_cap +
9742                                                      PCI_VPD_ADDR, &tmp16);
9743                                 if (tmp16 & 0x8000)
9744                                         break;
9745                                 msleep(1);
9746                         }
9747                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9748                                               &tmp);
9749                         tmp = cpu_to_le32(tmp);
9750                         memcpy(&vpd_data[i], &tmp, 4);
9751                 }
9752         }
9753
9754         /* Now parse and find the part number. */
9755         for (i = 0; i < 256; ) {
9756                 unsigned char val = vpd_data[i];
9757                 int block_end;
9758
9759                 if (val == 0x82 || val == 0x91) {
9760                         i = (i + 3 +
9761                              (vpd_data[i + 1] +
9762                               (vpd_data[i + 2] << 8)));
9763                         continue;
9764                 }
9765
9766                 if (val != 0x90)
9767                         goto out_not_found;
9768
9769                 block_end = (i + 3 +
9770                              (vpd_data[i + 1] +
9771                               (vpd_data[i + 2] << 8)));
9772                 i += 3;
9773                 while (i < block_end) {
9774                         if (vpd_data[i + 0] == 'P' &&
9775                             vpd_data[i + 1] == 'N') {
9776                                 int partno_len = vpd_data[i + 2];
9777
9778                                 if (partno_len > 24)
9779                                         goto out_not_found;
9780
9781                                 memcpy(tp->board_part_number,
9782                                        &vpd_data[i + 3],
9783                                        partno_len);
9784
9785                                 /* Success. */
9786                                 return;
9787                         }
9788                 }
9789
9790                 /* Part number not found. */
9791                 goto out_not_found;
9792         }
9793
9794 out_not_found:
9795         strcpy(tp->board_part_number, "none");
9796 }
9797
9798 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9799 {
9800         u32 val, offset, start;
9801
9802         if (tg3_nvram_read_swab(tp, 0, &val))
9803                 return;
9804
9805         if (val != TG3_EEPROM_MAGIC)
9806                 return;
9807
9808         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9809             tg3_nvram_read_swab(tp, 0x4, &start))
9810                 return;
9811
9812         offset = tg3_nvram_logical_addr(tp, offset);
9813         if (tg3_nvram_read_swab(tp, offset, &val))
9814                 return;
9815
9816         if ((val & 0xfc000000) == 0x0c000000) {
9817                 u32 ver_offset, addr;
9818                 int i;
9819
9820                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9821                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9822                         return;
9823
9824                 if (val != 0)
9825                         return;
9826
9827                 addr = offset + ver_offset - start;
9828                 for (i = 0; i < 16; i += 4) {
9829                         if (tg3_nvram_read(tp, addr + i, &val))
9830                                 return;
9831
9832                         val = cpu_to_le32(val);
9833                         memcpy(tp->fw_ver + i, &val, 4);
9834                 }
9835         }
9836 }
9837
9838 #ifdef CONFIG_SPARC64
9839 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9840 {
9841         struct pci_dev *pdev = tp->pdev;
9842         struct pcidev_cookie *pcp = pdev->sysdata;
9843
9844         if (pcp != NULL) {
9845                 int node = pcp->prom_node;
9846                 u32 venid;
9847                 int err;
9848
9849                 err = prom_getproperty(node, "subsystem-vendor-id",
9850                                        (char *) &venid, sizeof(venid));
9851                 if (err == 0 || err == -1)
9852                         return 0;
9853                 if (venid == PCI_VENDOR_ID_SUN)
9854                         return 1;
9855
9856                 /* TG3 chips onboard the SunBlade-2500 don't have the
9857                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9858                  * are distinguishable from non-Sun variants by being
9859                  * named "network" by the firmware.  Non-Sun cards will
9860                  * show up as being named "ethernet".
9861                  */
9862                 if (!strcmp(pcp->prom_name, "network"))
9863                         return 1;
9864         }
9865         return 0;
9866 }
9867 #endif
9868
9869 static int __devinit tg3_get_invariants(struct tg3 *tp)
9870 {
9871         static struct pci_device_id write_reorder_chipsets[] = {
9872                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9873                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9874                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9875                              PCI_DEVICE_ID_VIA_8385_0) },
9876                 { },
9877         };
9878         u32 misc_ctrl_reg;
9879         u32 cacheline_sz_reg;
9880         u32 pci_state_reg, grc_misc_cfg;
9881         u32 val;
9882         u16 pci_cmd;
9883         int err;
9884
9885 #ifdef CONFIG_SPARC64
9886         if (tg3_is_sun_570X(tp))
9887                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9888 #endif
9889
9890         /* Force memory write invalidate off.  If we leave it on,
9891          * then on 5700_BX chips we have to enable a workaround.
9892          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9893          * to match the cacheline size.  The Broadcom driver have this
9894          * workaround but turns MWI off all the times so never uses
9895          * it.  This seems to suggest that the workaround is insufficient.
9896          */
9897         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9898         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9899         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9900
9901         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9902          * has the register indirect write enable bit set before
9903          * we try to access any of the MMIO registers.  It is also
9904          * critical that the PCI-X hw workaround situation is decided
9905          * before that as well.
9906          */
9907         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9908                               &misc_ctrl_reg);
9909
9910         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9911                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9912
9913         /* Wrong chip ID in 5752 A0. This code can be removed later
9914          * as A0 is not in production.
9915          */
9916         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
9917                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
9918
9919         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9920          * we need to disable memory and use config. cycles
9921          * only to access all registers. The 5702/03 chips
9922          * can mistakenly decode the special cycles from the
9923          * ICH chipsets as memory write cycles, causing corruption
9924          * of register and memory space. Only certain ICH bridges
9925          * will drive special cycles with non-zero data during the
9926          * address phase which can fall within the 5703's address
9927          * range. This is not an ICH bug as the PCI spec allows
9928          * non-zero address during special cycles. However, only
9929          * these ICH bridges are known to drive non-zero addresses
9930          * during special cycles.
9931          *
9932          * Since special cycles do not cross PCI bridges, we only
9933          * enable this workaround if the 5703 is on the secondary
9934          * bus of these ICH bridges.
9935          */
9936         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
9937             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
9938                 static struct tg3_dev_id {
9939                         u32     vendor;
9940                         u32     device;
9941                         u32     rev;
9942                 } ich_chipsets[] = {
9943                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
9944                           PCI_ANY_ID },
9945                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
9946                           PCI_ANY_ID },
9947                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
9948                           0xa },
9949                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
9950                           PCI_ANY_ID },
9951                         { },
9952                 };
9953                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
9954                 struct pci_dev *bridge = NULL;
9955
9956                 while (pci_id->vendor != 0) {
9957                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
9958                                                 bridge);
9959                         if (!bridge) {
9960                                 pci_id++;
9961                                 continue;
9962                         }
9963                         if (pci_id->rev != PCI_ANY_ID) {
9964                                 u8 rev;
9965
9966                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
9967                                                      &rev);
9968                                 if (rev > pci_id->rev)
9969                                         continue;
9970                         }
9971                         if (bridge->subordinate &&
9972                             (bridge->subordinate->number ==
9973                              tp->pdev->bus->number)) {
9974
9975                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
9976                                 pci_dev_put(bridge);
9977                                 break;
9978                         }
9979                 }
9980         }
9981
9982         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9983          * DMA addresses > 40-bit. This bridge may have other additional
9984          * 57xx devices behind it in some 4-port NIC designs for example.
9985          * Any tg3 device found behind the bridge will also need the 40-bit
9986          * DMA workaround.
9987          */
9988         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9990                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9991                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
9992                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9993         }
9994         else {
9995                 struct pci_dev *bridge = NULL;
9996
9997                 do {
9998                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
9999                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10000                                                 bridge);
10001                         if (bridge && bridge->subordinate &&
10002                             (bridge->subordinate->number <=
10003                              tp->pdev->bus->number) &&
10004                             (bridge->subordinate->subordinate >=
10005                              tp->pdev->bus->number)) {
10006                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10007                                 pci_dev_put(bridge);
10008                                 break;
10009                         }
10010                 } while (bridge);
10011         }
10012
10013         /* Initialize misc host control in PCI block. */
10014         tp->misc_host_ctrl |= (misc_ctrl_reg &
10015                                MISC_HOST_CTRL_CHIPREV);
10016         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10017                                tp->misc_host_ctrl);
10018
10019         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10020                               &cacheline_sz_reg);
10021
10022         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10023         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10024         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10025         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10026
10027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10028             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10029             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10030             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10031                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10032
10033         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10034             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10035                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10036
10037         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10038                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10039                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10040                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10041                 } else
10042                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10043         }
10044
10045         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10046             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10047             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10048             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10049                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10050
10051         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10052                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10053
10054         /* If we have an AMD 762 or VIA K8T800 chipset, write
10055          * reordering to the mailbox registers done by the host
10056          * controller can cause major troubles.  We read back from
10057          * every mailbox register write to force the writes to be
10058          * posted to the chip in order.
10059          */
10060         if (pci_dev_present(write_reorder_chipsets) &&
10061             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10062                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10063
10064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10065             tp->pci_lat_timer < 64) {
10066                 tp->pci_lat_timer = 64;
10067
10068                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10069                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10070                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10071                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10072
10073                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10074                                        cacheline_sz_reg);
10075         }
10076
10077         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10078                               &pci_state_reg);
10079
10080         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10081                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10082
10083                 /* If this is a 5700 BX chipset, and we are in PCI-X
10084                  * mode, enable register write workaround.
10085                  *
10086                  * The workaround is to use indirect register accesses
10087                  * for all chip writes not to mailbox registers.
10088                  */
10089                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10090                         u32 pm_reg;
10091                         u16 pci_cmd;
10092
10093                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10094
10095                         /* The chip can have it's power management PCI config
10096                          * space registers clobbered due to this bug.
10097                          * So explicitly force the chip into D0 here.
10098                          */
10099                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10100                                               &pm_reg);
10101                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10102                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10103                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10104                                                pm_reg);
10105
10106                         /* Also, force SERR#/PERR# in PCI command. */
10107                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10108                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10109                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10110                 }
10111         }
10112
10113         /* 5700 BX chips need to have their TX producer index mailboxes
10114          * written twice to workaround a bug.
10115          */
10116         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10117                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10118
10119         /* Back to back register writes can cause problems on this chip,
10120          * the workaround is to read back all reg writes except those to
10121          * mailbox regs.  See tg3_write_indirect_reg32().
10122          *
10123          * PCI Express 5750_A0 rev chips need this workaround too.
10124          */
10125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10126             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10127              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10128                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10129
10130         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10131                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10132         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10133                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10134
10135         /* Chip-specific fixup from Broadcom driver */
10136         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10137             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10138                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10139                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10140         }
10141
10142         /* Default fast path register access methods */
10143         tp->read32 = tg3_read32;
10144         tp->write32 = tg3_write32;
10145         tp->read32_mbox = tg3_read32;
10146         tp->write32_mbox = tg3_write32;
10147         tp->write32_tx_mbox = tg3_write32;
10148         tp->write32_rx_mbox = tg3_write32;
10149
10150         /* Various workaround register access methods */
10151         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10152                 tp->write32 = tg3_write_indirect_reg32;
10153         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10154                 tp->write32 = tg3_write_flush_reg32;
10155
10156         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10157             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10158                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10159                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10160                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10161         }
10162
10163         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10164                 tp->read32 = tg3_read_indirect_reg32;
10165                 tp->write32 = tg3_write_indirect_reg32;
10166                 tp->read32_mbox = tg3_read_indirect_mbox;
10167                 tp->write32_mbox = tg3_write_indirect_mbox;
10168                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10169                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10170
10171                 iounmap(tp->regs);
10172                 tp->regs = NULL;
10173
10174                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10175                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10176                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10177         }
10178
10179         /* Get eeprom hw config before calling tg3_set_power_state().
10180          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10181          * determined before calling tg3_set_power_state() so that
10182          * we know whether or not to switch out of Vaux power.
10183          * When the flag is set, it means that GPIO1 is used for eeprom
10184          * write protect and also implies that it is a LOM where GPIOs
10185          * are not used to switch power.
10186          */ 
10187         tg3_get_eeprom_hw_cfg(tp);
10188
10189         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10190          * GPIO1 driven high will bring 5700's external PHY out of reset.
10191          * It is also used as eeprom write protect on LOMs.
10192          */
10193         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10194         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10195             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10196                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10197                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10198         /* Unused GPIO3 must be driven as output on 5752 because there
10199          * are no pull-up resistors on unused GPIO pins.
10200          */
10201         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10202                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10203
10204         /* Force the chip into D0. */
10205         err = tg3_set_power_state(tp, PCI_D0);
10206         if (err) {
10207                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10208                        pci_name(tp->pdev));
10209                 return err;
10210         }
10211
10212         /* 5700 B0 chips do not support checksumming correctly due
10213          * to hardware bugs.
10214          */
10215         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10216                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10217
10218         /* Pseudo-header checksum is done by hardware logic and not
10219          * the offload processers, so make the chip do the pseudo-
10220          * header checksums on receive.  For transmit it is more
10221          * convenient to do the pseudo-header checksum in software
10222          * as Linux does that on transmit for us in all cases.
10223          */
10224         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10225         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10226
10227         /* Derive initial jumbo mode from MTU assigned in
10228          * ether_setup() via the alloc_etherdev() call
10229          */
10230         if (tp->dev->mtu > ETH_DATA_LEN &&
10231             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10232                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10233
10234         /* Determine WakeOnLan speed to use. */
10235         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10236             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10237             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10238             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10239                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10240         } else {
10241                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10242         }
10243
10244         /* A few boards don't want Ethernet@WireSpeed phy feature */
10245         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10246             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10247              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10248              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10249             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10250                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10251
10252         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10253             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10254                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10255         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10256                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10257
10258         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10259             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10260                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10261
10262         tp->coalesce_mode = 0;
10263         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10264             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10265                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10266
10267         /* Initialize MAC MI mode, polling disabled. */
10268         tw32_f(MAC_MI_MODE, tp->mi_mode);
10269         udelay(80);
10270
10271         /* Initialize data/descriptor byte/word swapping. */
10272         val = tr32(GRC_MODE);
10273         val &= GRC_MODE_HOST_STACKUP;
10274         tw32(GRC_MODE, val | tp->grc_mode);
10275
10276         tg3_switch_clocks(tp);
10277
10278         /* Clear this out for sanity. */
10279         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10280
10281         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10282                               &pci_state_reg);
10283         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10284             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10285                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10286
10287                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10288                     chiprevid == CHIPREV_ID_5701_B0 ||
10289                     chiprevid == CHIPREV_ID_5701_B2 ||
10290                     chiprevid == CHIPREV_ID_5701_B5) {
10291                         void __iomem *sram_base;
10292
10293                         /* Write some dummy words into the SRAM status block
10294                          * area, see if it reads back correctly.  If the return
10295                          * value is bad, force enable the PCIX workaround.
10296                          */
10297                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10298
10299                         writel(0x00000000, sram_base);
10300                         writel(0x00000000, sram_base + 4);
10301                         writel(0xffffffff, sram_base + 4);
10302                         if (readl(sram_base) != 0x00000000)
10303                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10304                 }
10305         }
10306
10307         udelay(50);
10308         tg3_nvram_init(tp);
10309
10310         grc_misc_cfg = tr32(GRC_MISC_CFG);
10311         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10312
10313         /* Broadcom's driver says that CIOBE multisplit has a bug */
10314 #if 0
10315         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10316             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10317                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10318                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10319         }
10320 #endif
10321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10322             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10323              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10324                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10325
10326         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10327             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10328                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10329         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10330                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10331                                       HOSTCC_MODE_CLRTICK_TXBD);
10332
10333                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10334                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10335                                        tp->misc_host_ctrl);
10336         }
10337
10338         /* these are limited to 10/100 only */
10339         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10340              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10341             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10342              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10343              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10344               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10345               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10346             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10347              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10348               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10349                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10350
10351         err = tg3_phy_probe(tp);
10352         if (err) {
10353                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10354                        pci_name(tp->pdev), err);
10355                 /* ... but do not return immediately ... */
10356         }
10357
10358         tg3_read_partno(tp);
10359         tg3_read_fw_ver(tp);
10360
10361         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10362                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10363         } else {
10364                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10365                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10366                 else
10367                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10368         }
10369
10370         /* 5700 {AX,BX} chips have a broken status block link
10371          * change bit implementation, so we must use the
10372          * status register in those cases.
10373          */
10374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10375                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10376         else
10377                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10378
10379         /* The led_ctrl is set during tg3_phy_probe, here we might
10380          * have to force the link status polling mechanism based
10381          * upon subsystem IDs.
10382          */
10383         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10384             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10385                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10386                                   TG3_FLAG_USE_LINKCHG_REG);
10387         }
10388
10389         /* For all SERDES we poll the MAC status register. */
10390         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10391                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10392         else
10393                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10394
10395         /* All chips before 5787 can get confused if TX buffers
10396          * straddle the 4GB address boundary in some cases.
10397          */
10398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10399                 tp->dev->hard_start_xmit = tg3_start_xmit;
10400         else
10401                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10402
10403         tp->rx_offset = 2;
10404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10405             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10406                 tp->rx_offset = 0;
10407
10408         /* By default, disable wake-on-lan.  User can change this
10409          * using ETHTOOL_SWOL.
10410          */
10411         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10412
10413         return err;
10414 }
10415
10416 #ifdef CONFIG_SPARC64
10417 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10418 {
10419         struct net_device *dev = tp->dev;
10420         struct pci_dev *pdev = tp->pdev;
10421         struct pcidev_cookie *pcp = pdev->sysdata;
10422
10423         if (pcp != NULL) {
10424                 int node = pcp->prom_node;
10425
10426                 if (prom_getproplen(node, "local-mac-address") == 6) {
10427                         prom_getproperty(node, "local-mac-address",
10428                                          dev->dev_addr, 6);
10429                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10430                         return 0;
10431                 }
10432         }
10433         return -ENODEV;
10434 }
10435
10436 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10437 {
10438         struct net_device *dev = tp->dev;
10439
10440         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10441         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10442         return 0;
10443 }
10444 #endif
10445
10446 static int __devinit tg3_get_device_address(struct tg3 *tp)
10447 {
10448         struct net_device *dev = tp->dev;
10449         u32 hi, lo, mac_offset;
10450
10451 #ifdef CONFIG_SPARC64
10452         if (!tg3_get_macaddr_sparc(tp))
10453                 return 0;
10454 #endif
10455
10456         mac_offset = 0x7c;
10457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10458              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10459             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10460                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10461                         mac_offset = 0xcc;
10462                 if (tg3_nvram_lock(tp))
10463                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10464                 else
10465                         tg3_nvram_unlock(tp);
10466         }
10467
10468         /* First try to get it from MAC address mailbox. */
10469         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10470         if ((hi >> 16) == 0x484b) {
10471                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10472                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10473
10474                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10475                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10476                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10477                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10478                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10479         }
10480         /* Next, try NVRAM. */
10481         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10482                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10483                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10484                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10485                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10486                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
10487                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
10488                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10489                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10490         }
10491         /* Finally just fetch it out of the MAC control regs. */
10492         else {
10493                 hi = tr32(MAC_ADDR_0_HIGH);
10494                 lo = tr32(MAC_ADDR_0_LOW);
10495
10496                 dev->dev_addr[5] = lo & 0xff;
10497                 dev->dev_addr[4] = (lo >> 8) & 0xff;
10498                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10499                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10500                 dev->dev_addr[1] = hi & 0xff;
10501                 dev->dev_addr[0] = (hi >> 8) & 0xff;
10502         }
10503
10504         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10505 #ifdef CONFIG_SPARC64
10506                 if (!tg3_get_default_macaddr_sparc(tp))
10507                         return 0;
10508 #endif
10509                 return -EINVAL;
10510         }
10511         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10512         return 0;
10513 }
10514
10515 #define BOUNDARY_SINGLE_CACHELINE       1
10516 #define BOUNDARY_MULTI_CACHELINE        2
10517
10518 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10519 {
10520         int cacheline_size;
10521         u8 byte;
10522         int goal;
10523
10524         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10525         if (byte == 0)
10526                 cacheline_size = 1024;
10527         else
10528                 cacheline_size = (int) byte * 4;
10529
10530         /* On 5703 and later chips, the boundary bits have no
10531          * effect.
10532          */
10533         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10534             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10535             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10536                 goto out;
10537
10538 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10539         goal = BOUNDARY_MULTI_CACHELINE;
10540 #else
10541 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10542         goal = BOUNDARY_SINGLE_CACHELINE;
10543 #else
10544         goal = 0;
10545 #endif
10546 #endif
10547
10548         if (!goal)
10549                 goto out;
10550
10551         /* PCI controllers on most RISC systems tend to disconnect
10552          * when a device tries to burst across a cache-line boundary.
10553          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10554          *
10555          * Unfortunately, for PCI-E there are only limited
10556          * write-side controls for this, and thus for reads
10557          * we will still get the disconnects.  We'll also waste
10558          * these PCI cycles for both read and write for chips
10559          * other than 5700 and 5701 which do not implement the
10560          * boundary bits.
10561          */
10562         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10563             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10564                 switch (cacheline_size) {
10565                 case 16:
10566                 case 32:
10567                 case 64:
10568                 case 128:
10569                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10570                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10571                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10572                         } else {
10573                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10574                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10575                         }
10576                         break;
10577
10578                 case 256:
10579                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10580                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10581                         break;
10582
10583                 default:
10584                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10585                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10586                         break;
10587                 };
10588         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10589                 switch (cacheline_size) {
10590                 case 16:
10591                 case 32:
10592                 case 64:
10593                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10594                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10595                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10596                                 break;
10597                         }
10598                         /* fallthrough */
10599                 case 128:
10600                 default:
10601                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10602                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10603                         break;
10604                 };
10605         } else {
10606                 switch (cacheline_size) {
10607                 case 16:
10608                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10609                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10610                                         DMA_RWCTRL_WRITE_BNDRY_16);
10611                                 break;
10612                         }
10613                         /* fallthrough */
10614                 case 32:
10615                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10616                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10617                                         DMA_RWCTRL_WRITE_BNDRY_32);
10618                                 break;
10619                         }
10620                         /* fallthrough */
10621                 case 64:
10622                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10623                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10624                                         DMA_RWCTRL_WRITE_BNDRY_64);
10625                                 break;
10626                         }
10627                         /* fallthrough */
10628                 case 128:
10629                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10630                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10631                                         DMA_RWCTRL_WRITE_BNDRY_128);
10632                                 break;
10633                         }
10634                         /* fallthrough */
10635                 case 256:
10636                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10637                                 DMA_RWCTRL_WRITE_BNDRY_256);
10638                         break;
10639                 case 512:
10640                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10641                                 DMA_RWCTRL_WRITE_BNDRY_512);
10642                         break;
10643                 case 1024:
10644                 default:
10645                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10646                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10647                         break;
10648                 };
10649         }
10650
10651 out:
10652         return val;
10653 }
10654
10655 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10656 {
10657         struct tg3_internal_buffer_desc test_desc;
10658         u32 sram_dma_descs;
10659         int i, ret;
10660
10661         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10662
10663         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10664         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10665         tw32(RDMAC_STATUS, 0);
10666         tw32(WDMAC_STATUS, 0);
10667
10668         tw32(BUFMGR_MODE, 0);
10669         tw32(FTQ_RESET, 0);
10670
10671         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10672         test_desc.addr_lo = buf_dma & 0xffffffff;
10673         test_desc.nic_mbuf = 0x00002100;
10674         test_desc.len = size;
10675
10676         /*
10677          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10678          * the *second* time the tg3 driver was getting loaded after an
10679          * initial scan.
10680          *
10681          * Broadcom tells me:
10682          *   ...the DMA engine is connected to the GRC block and a DMA
10683          *   reset may affect the GRC block in some unpredictable way...
10684          *   The behavior of resets to individual blocks has not been tested.
10685          *
10686          * Broadcom noted the GRC reset will also reset all sub-components.
10687          */
10688         if (to_device) {
10689                 test_desc.cqid_sqid = (13 << 8) | 2;
10690
10691                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10692                 udelay(40);
10693         } else {
10694                 test_desc.cqid_sqid = (16 << 8) | 7;
10695
10696                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10697                 udelay(40);
10698         }
10699         test_desc.flags = 0x00000005;
10700
10701         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10702                 u32 val;
10703
10704                 val = *(((u32 *)&test_desc) + i);
10705                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10706                                        sram_dma_descs + (i * sizeof(u32)));
10707                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10708         }
10709         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10710
10711         if (to_device) {
10712                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10713         } else {
10714                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10715         }
10716
10717         ret = -ENODEV;
10718         for (i = 0; i < 40; i++) {
10719                 u32 val;
10720
10721                 if (to_device)
10722                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10723                 else
10724                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10725                 if ((val & 0xffff) == sram_dma_descs) {
10726                         ret = 0;
10727                         break;
10728                 }
10729
10730                 udelay(100);
10731         }
10732
10733         return ret;
10734 }
10735
10736 #define TEST_BUFFER_SIZE        0x2000
10737
10738 static int __devinit tg3_test_dma(struct tg3 *tp)
10739 {
10740         dma_addr_t buf_dma;
10741         u32 *buf, saved_dma_rwctrl;
10742         int ret;
10743
10744         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10745         if (!buf) {
10746                 ret = -ENOMEM;
10747                 goto out_nofree;
10748         }
10749
10750         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10751                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10752
10753         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10754
10755         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10756                 /* DMA read watermark not used on PCIE */
10757                 tp->dma_rwctrl |= 0x00180000;
10758         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10759                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10760                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10761                         tp->dma_rwctrl |= 0x003f0000;
10762                 else
10763                         tp->dma_rwctrl |= 0x003f000f;
10764         } else {
10765                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10766                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10767                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10768
10769                         /* If the 5704 is behind the EPB bridge, we can
10770                          * do the less restrictive ONE_DMA workaround for
10771                          * better performance.
10772                          */
10773                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10774                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10775                                 tp->dma_rwctrl |= 0x8000;
10776                         else if (ccval == 0x6 || ccval == 0x7)
10777                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10778
10779                         /* Set bit 23 to enable PCIX hw bug fix */
10780                         tp->dma_rwctrl |= 0x009f0000;
10781                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10782                         /* 5780 always in PCIX mode */
10783                         tp->dma_rwctrl |= 0x00144000;
10784                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10785                         /* 5714 always in PCIX mode */
10786                         tp->dma_rwctrl |= 0x00148000;
10787                 } else {
10788                         tp->dma_rwctrl |= 0x001b000f;
10789                 }
10790         }
10791
10792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10794                 tp->dma_rwctrl &= 0xfffffff0;
10795
10796         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10797             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10798                 /* Remove this if it causes problems for some boards. */
10799                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10800
10801                 /* On 5700/5701 chips, we need to set this bit.
10802                  * Otherwise the chip will issue cacheline transactions
10803                  * to streamable DMA memory with not all the byte
10804                  * enables turned on.  This is an error on several
10805                  * RISC PCI controllers, in particular sparc64.
10806                  *
10807                  * On 5703/5704 chips, this bit has been reassigned
10808                  * a different meaning.  In particular, it is used
10809                  * on those chips to enable a PCI-X workaround.
10810                  */
10811                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10812         }
10813
10814         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10815
10816 #if 0
10817         /* Unneeded, already done by tg3_get_invariants.  */
10818         tg3_switch_clocks(tp);
10819 #endif
10820
10821         ret = 0;
10822         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10823             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10824                 goto out;
10825
10826         /* It is best to perform DMA test with maximum write burst size
10827          * to expose the 5700/5701 write DMA bug.
10828          */
10829         saved_dma_rwctrl = tp->dma_rwctrl;
10830         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10831         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10832
10833         while (1) {
10834                 u32 *p = buf, i;
10835
10836                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10837                         p[i] = i;
10838
10839                 /* Send the buffer to the chip. */
10840                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10841                 if (ret) {
10842                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10843                         break;
10844                 }
10845
10846 #if 0
10847                 /* validate data reached card RAM correctly. */
10848                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10849                         u32 val;
10850                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10851                         if (le32_to_cpu(val) != p[i]) {
10852                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10853                                 /* ret = -ENODEV here? */
10854                         }
10855                         p[i] = 0;
10856                 }
10857 #endif
10858                 /* Now read it back. */
10859                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10860                 if (ret) {
10861                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10862
10863                         break;
10864                 }
10865
10866                 /* Verify it. */
10867                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10868                         if (p[i] == i)
10869                                 continue;
10870
10871                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10872                             DMA_RWCTRL_WRITE_BNDRY_16) {
10873                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10874                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10875                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10876                                 break;
10877                         } else {
10878                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10879                                 ret = -ENODEV;
10880                                 goto out;
10881                         }
10882                 }
10883
10884                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10885                         /* Success. */
10886                         ret = 0;
10887                         break;
10888                 }
10889         }
10890         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10891             DMA_RWCTRL_WRITE_BNDRY_16) {
10892                 static struct pci_device_id dma_wait_state_chipsets[] = {
10893                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10894                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10895                         { },
10896                 };
10897
10898                 /* DMA test passed without adjusting DMA boundary,
10899                  * now look for chipsets that are known to expose the
10900                  * DMA bug without failing the test.
10901                  */
10902                 if (pci_dev_present(dma_wait_state_chipsets)) {
10903                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10904                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10905                 }
10906                 else
10907                         /* Safe to use the calculated DMA boundary. */
10908                         tp->dma_rwctrl = saved_dma_rwctrl;
10909
10910                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10911         }
10912
10913 out:
10914         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
10915 out_nofree:
10916         return ret;
10917 }
10918
10919 static void __devinit tg3_init_link_config(struct tg3 *tp)
10920 {
10921         tp->link_config.advertising =
10922                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10923                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10924                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
10925                  ADVERTISED_Autoneg | ADVERTISED_MII);
10926         tp->link_config.speed = SPEED_INVALID;
10927         tp->link_config.duplex = DUPLEX_INVALID;
10928         tp->link_config.autoneg = AUTONEG_ENABLE;
10929         tp->link_config.active_speed = SPEED_INVALID;
10930         tp->link_config.active_duplex = DUPLEX_INVALID;
10931         tp->link_config.phy_is_low_power = 0;
10932         tp->link_config.orig_speed = SPEED_INVALID;
10933         tp->link_config.orig_duplex = DUPLEX_INVALID;
10934         tp->link_config.orig_autoneg = AUTONEG_INVALID;
10935 }
10936
10937 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
10938 {
10939         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10940                 tp->bufmgr_config.mbuf_read_dma_low_water =
10941                         DEFAULT_MB_RDMA_LOW_WATER_5705;
10942                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10943                         DEFAULT_MB_MACRX_LOW_WATER_5705;
10944                 tp->bufmgr_config.mbuf_high_water =
10945                         DEFAULT_MB_HIGH_WATER_5705;
10946
10947                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10948                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
10949                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10950                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
10951                 tp->bufmgr_config.mbuf_high_water_jumbo =
10952                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
10953         } else {
10954                 tp->bufmgr_config.mbuf_read_dma_low_water =
10955                         DEFAULT_MB_RDMA_LOW_WATER;
10956                 tp->bufmgr_config.mbuf_mac_rx_low_water =
10957                         DEFAULT_MB_MACRX_LOW_WATER;
10958                 tp->bufmgr_config.mbuf_high_water =
10959                         DEFAULT_MB_HIGH_WATER;
10960
10961                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
10962                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
10963                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
10964                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
10965                 tp->bufmgr_config.mbuf_high_water_jumbo =
10966                         DEFAULT_MB_HIGH_WATER_JUMBO;
10967         }
10968
10969         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
10970         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
10971 }
10972
10973 static char * __devinit tg3_phy_string(struct tg3 *tp)
10974 {
10975         switch (tp->phy_id & PHY_ID_MASK) {
10976         case PHY_ID_BCM5400:    return "5400";
10977         case PHY_ID_BCM5401:    return "5401";
10978         case PHY_ID_BCM5411:    return "5411";
10979         case PHY_ID_BCM5701:    return "5701";
10980         case PHY_ID_BCM5703:    return "5703";
10981         case PHY_ID_BCM5704:    return "5704";
10982         case PHY_ID_BCM5705:    return "5705";
10983         case PHY_ID_BCM5750:    return "5750";
10984         case PHY_ID_BCM5752:    return "5752";
10985         case PHY_ID_BCM5714:    return "5714";
10986         case PHY_ID_BCM5780:    return "5780";
10987         case PHY_ID_BCM5787:    return "5787";
10988         case PHY_ID_BCM8002:    return "8002/serdes";
10989         case 0:                 return "serdes";
10990         default:                return "unknown";
10991         };
10992 }
10993
10994 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10995 {
10996         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10997                 strcpy(str, "PCI Express");
10998                 return str;
10999         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11000                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11001
11002                 strcpy(str, "PCIX:");
11003
11004                 if ((clock_ctrl == 7) ||
11005                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11006                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11007                         strcat(str, "133MHz");
11008                 else if (clock_ctrl == 0)
11009                         strcat(str, "33MHz");
11010                 else if (clock_ctrl == 2)
11011                         strcat(str, "50MHz");
11012                 else if (clock_ctrl == 4)
11013                         strcat(str, "66MHz");
11014                 else if (clock_ctrl == 6)
11015                         strcat(str, "100MHz");
11016         } else {
11017                 strcpy(str, "PCI:");
11018                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11019                         strcat(str, "66MHz");
11020                 else
11021                         strcat(str, "33MHz");
11022         }
11023         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11024                 strcat(str, ":32-bit");
11025         else
11026                 strcat(str, ":64-bit");
11027         return str;
11028 }
11029
11030 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11031 {
11032         struct pci_dev *peer;
11033         unsigned int func, devnr = tp->pdev->devfn & ~7;
11034
11035         for (func = 0; func < 8; func++) {
11036                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11037                 if (peer && peer != tp->pdev)
11038                         break;
11039                 pci_dev_put(peer);
11040         }
11041         /* 5704 can be configured in single-port mode, set peer to
11042          * tp->pdev in that case.
11043          */
11044         if (!peer) {
11045                 peer = tp->pdev;
11046                 return peer;
11047         }
11048
11049         /*
11050          * We don't need to keep the refcount elevated; there's no way
11051          * to remove one half of this device without removing the other
11052          */
11053         pci_dev_put(peer);
11054
11055         return peer;
11056 }
11057
11058 static void __devinit tg3_init_coal(struct tg3 *tp)
11059 {
11060         struct ethtool_coalesce *ec = &tp->coal;
11061
11062         memset(ec, 0, sizeof(*ec));
11063         ec->cmd = ETHTOOL_GCOALESCE;
11064         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11065         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11066         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11067         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11068         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11069         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11070         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11071         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11072         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11073
11074         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11075                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11076                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11077                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11078                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11079                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11080         }
11081
11082         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11083                 ec->rx_coalesce_usecs_irq = 0;
11084                 ec->tx_coalesce_usecs_irq = 0;
11085                 ec->stats_block_coalesce_usecs = 0;
11086         }
11087 }
11088
11089 static int __devinit tg3_init_one(struct pci_dev *pdev,
11090                                   const struct pci_device_id *ent)
11091 {
11092         static int tg3_version_printed = 0;
11093         unsigned long tg3reg_base, tg3reg_len;
11094         struct net_device *dev;
11095         struct tg3 *tp;
11096         int i, err, pm_cap;
11097         char str[40];
11098         u64 dma_mask, persist_dma_mask;
11099
11100         if (tg3_version_printed++ == 0)
11101                 printk(KERN_INFO "%s", version);
11102
11103         err = pci_enable_device(pdev);
11104         if (err) {
11105                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11106                        "aborting.\n");
11107                 return err;
11108         }
11109
11110         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11111                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11112                        "base address, aborting.\n");
11113                 err = -ENODEV;
11114                 goto err_out_disable_pdev;
11115         }
11116
11117         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11118         if (err) {
11119                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11120                        "aborting.\n");
11121                 goto err_out_disable_pdev;
11122         }
11123
11124         pci_set_master(pdev);
11125
11126         /* Find power-management capability. */
11127         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11128         if (pm_cap == 0) {
11129                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11130                        "aborting.\n");
11131                 err = -EIO;
11132                 goto err_out_free_res;
11133         }
11134
11135         tg3reg_base = pci_resource_start(pdev, 0);
11136         tg3reg_len = pci_resource_len(pdev, 0);
11137
11138         dev = alloc_etherdev(sizeof(*tp));
11139         if (!dev) {
11140                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11141                 err = -ENOMEM;
11142                 goto err_out_free_res;
11143         }
11144
11145         SET_MODULE_OWNER(dev);
11146         SET_NETDEV_DEV(dev, &pdev->dev);
11147
11148         dev->features |= NETIF_F_LLTX;
11149 #if TG3_VLAN_TAG_USED
11150         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11151         dev->vlan_rx_register = tg3_vlan_rx_register;
11152         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11153 #endif
11154
11155         tp = netdev_priv(dev);
11156         tp->pdev = pdev;
11157         tp->dev = dev;
11158         tp->pm_cap = pm_cap;
11159         tp->mac_mode = TG3_DEF_MAC_MODE;
11160         tp->rx_mode = TG3_DEF_RX_MODE;
11161         tp->tx_mode = TG3_DEF_TX_MODE;
11162         tp->mi_mode = MAC_MI_MODE_BASE;
11163         if (tg3_debug > 0)
11164                 tp->msg_enable = tg3_debug;
11165         else
11166                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11167
11168         /* The word/byte swap controls here control register access byte
11169          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11170          * setting below.
11171          */
11172         tp->misc_host_ctrl =
11173                 MISC_HOST_CTRL_MASK_PCI_INT |
11174                 MISC_HOST_CTRL_WORD_SWAP |
11175                 MISC_HOST_CTRL_INDIR_ACCESS |
11176                 MISC_HOST_CTRL_PCISTATE_RW;
11177
11178         /* The NONFRM (non-frame) byte/word swap controls take effect
11179          * on descriptor entries, anything which isn't packet data.
11180          *
11181          * The StrongARM chips on the board (one for tx, one for rx)
11182          * are running in big-endian mode.
11183          */
11184         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11185                         GRC_MODE_WSWAP_NONFRM_DATA);
11186 #ifdef __BIG_ENDIAN
11187         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11188 #endif
11189         spin_lock_init(&tp->lock);
11190         spin_lock_init(&tp->tx_lock);
11191         spin_lock_init(&tp->indirect_lock);
11192         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11193
11194         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11195         if (tp->regs == 0UL) {
11196                 printk(KERN_ERR PFX "Cannot map device registers, "
11197                        "aborting.\n");
11198                 err = -ENOMEM;
11199                 goto err_out_free_dev;
11200         }
11201
11202         tg3_init_link_config(tp);
11203
11204         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11205         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11206         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11207
11208         dev->open = tg3_open;
11209         dev->stop = tg3_close;
11210         dev->get_stats = tg3_get_stats;
11211         dev->set_multicast_list = tg3_set_rx_mode;
11212         dev->set_mac_address = tg3_set_mac_addr;
11213         dev->do_ioctl = tg3_ioctl;
11214         dev->tx_timeout = tg3_tx_timeout;
11215         dev->poll = tg3_poll;
11216         dev->ethtool_ops = &tg3_ethtool_ops;
11217         dev->weight = 64;
11218         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11219         dev->change_mtu = tg3_change_mtu;
11220         dev->irq = pdev->irq;
11221 #ifdef CONFIG_NET_POLL_CONTROLLER
11222         dev->poll_controller = tg3_poll_controller;
11223 #endif
11224
11225         err = tg3_get_invariants(tp);
11226         if (err) {
11227                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11228                        "aborting.\n");
11229                 goto err_out_iounmap;
11230         }
11231
11232         /* The EPB bridge inside 5714, 5715, and 5780 and any
11233          * device behind the EPB cannot support DMA addresses > 40-bit.
11234          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11235          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11236          * do DMA address check in tg3_start_xmit().
11237          */
11238         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11239                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11240         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11241                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11242 #ifdef CONFIG_HIGHMEM
11243                 dma_mask = DMA_64BIT_MASK;
11244 #endif
11245         } else
11246                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11247
11248         /* Configure DMA attributes. */
11249         if (dma_mask > DMA_32BIT_MASK) {
11250                 err = pci_set_dma_mask(pdev, dma_mask);
11251                 if (!err) {
11252                         dev->features |= NETIF_F_HIGHDMA;
11253                         err = pci_set_consistent_dma_mask(pdev,
11254                                                           persist_dma_mask);
11255                         if (err < 0) {
11256                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11257                                        "DMA for consistent allocations\n");
11258                                 goto err_out_iounmap;
11259                         }
11260                 }
11261         }
11262         if (err || dma_mask == DMA_32BIT_MASK) {
11263                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11264                 if (err) {
11265                         printk(KERN_ERR PFX "No usable DMA configuration, "
11266                                "aborting.\n");
11267                         goto err_out_iounmap;
11268                 }
11269         }
11270
11271         tg3_init_bufmgr_config(tp);
11272
11273 #if TG3_TSO_SUPPORT != 0
11274         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11275                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11276         }
11277         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11278             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11279             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11280             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11281                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11282         } else {
11283                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11284         }
11285
11286         /* TSO is on by default on chips that support hardware TSO.
11287          * Firmware TSO on older chips gives lower performance, so it
11288          * is off by default, but can be enabled using ethtool.
11289          */
11290         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11291                 dev->features |= NETIF_F_TSO;
11292
11293 #endif
11294
11295         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11296             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11297             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11298                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11299                 tp->rx_pending = 63;
11300         }
11301
11302         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11303             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11304                 tp->pdev_peer = tg3_find_peer(tp);
11305
11306         err = tg3_get_device_address(tp);
11307         if (err) {
11308                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11309                        "aborting.\n");
11310                 goto err_out_iounmap;
11311         }
11312
11313         /*
11314          * Reset chip in case UNDI or EFI driver did not shutdown
11315          * DMA self test will enable WDMAC and we'll see (spurious)
11316          * pending DMA on the PCI bus at that point.
11317          */
11318         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11319             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11320                 pci_save_state(tp->pdev);
11321                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11322                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11323         }
11324
11325         err = tg3_test_dma(tp);
11326         if (err) {
11327                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11328                 goto err_out_iounmap;
11329         }
11330
11331         /* Tigon3 can do ipv4 only... and some chips have buggy
11332          * checksumming.
11333          */
11334         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11335                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11336                         dev->features |= NETIF_F_HW_CSUM;
11337                 else
11338                         dev->features |= NETIF_F_IP_CSUM;
11339                 dev->features |= NETIF_F_SG;
11340                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11341         } else
11342                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11343
11344         /* flow control autonegotiation is default behavior */
11345         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11346
11347         tg3_init_coal(tp);
11348
11349         /* Now that we have fully setup the chip, save away a snapshot
11350          * of the PCI config space.  We need to restore this after
11351          * GRC_MISC_CFG core clock resets and some resume events.
11352          */
11353         pci_save_state(tp->pdev);
11354
11355         err = register_netdev(dev);
11356         if (err) {
11357                 printk(KERN_ERR PFX "Cannot register net device, "
11358                        "aborting.\n");
11359                 goto err_out_iounmap;
11360         }
11361
11362         pci_set_drvdata(pdev, dev);
11363
11364         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11365                dev->name,
11366                tp->board_part_number,
11367                tp->pci_chip_rev_id,
11368                tg3_phy_string(tp),
11369                tg3_bus_string(tp, str),
11370                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11371
11372         for (i = 0; i < 6; i++)
11373                 printk("%2.2x%c", dev->dev_addr[i],
11374                        i == 5 ? '\n' : ':');
11375
11376         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11377                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11378                "TSOcap[%d] \n",
11379                dev->name,
11380                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11381                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11382                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11383                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11384                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11385                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11386                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11387         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11388                dev->name, tp->dma_rwctrl,
11389                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11390                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11391
11392         netif_carrier_off(tp->dev);
11393
11394         return 0;
11395
11396 err_out_iounmap:
11397         if (tp->regs) {
11398                 iounmap(tp->regs);
11399                 tp->regs = NULL;
11400         }
11401
11402 err_out_free_dev:
11403         free_netdev(dev);
11404
11405 err_out_free_res:
11406         pci_release_regions(pdev);
11407
11408 err_out_disable_pdev:
11409         pci_disable_device(pdev);
11410         pci_set_drvdata(pdev, NULL);
11411         return err;
11412 }
11413
11414 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11415 {
11416         struct net_device *dev = pci_get_drvdata(pdev);
11417
11418         if (dev) {
11419                 struct tg3 *tp = netdev_priv(dev);
11420
11421                 flush_scheduled_work();
11422                 unregister_netdev(dev);
11423                 if (tp->regs) {
11424                         iounmap(tp->regs);
11425                         tp->regs = NULL;
11426                 }
11427                 free_netdev(dev);
11428                 pci_release_regions(pdev);
11429                 pci_disable_device(pdev);
11430                 pci_set_drvdata(pdev, NULL);
11431         }
11432 }
11433
11434 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11435 {
11436         struct net_device *dev = pci_get_drvdata(pdev);
11437         struct tg3 *tp = netdev_priv(dev);
11438         int err;
11439
11440         if (!netif_running(dev))
11441                 return 0;
11442
11443         flush_scheduled_work();
11444         tg3_netif_stop(tp);
11445
11446         del_timer_sync(&tp->timer);
11447
11448         tg3_full_lock(tp, 1);
11449         tg3_disable_ints(tp);
11450         tg3_full_unlock(tp);
11451
11452         netif_device_detach(dev);
11453
11454         tg3_full_lock(tp, 0);
11455         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11456         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11457         tg3_full_unlock(tp);
11458
11459         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11460         if (err) {
11461                 tg3_full_lock(tp, 0);
11462
11463                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11464                 tg3_init_hw(tp);
11465
11466                 tp->timer.expires = jiffies + tp->timer_offset;
11467                 add_timer(&tp->timer);
11468
11469                 netif_device_attach(dev);
11470                 tg3_netif_start(tp);
11471
11472                 tg3_full_unlock(tp);
11473         }
11474
11475         return err;
11476 }
11477
11478 static int tg3_resume(struct pci_dev *pdev)
11479 {
11480         struct net_device *dev = pci_get_drvdata(pdev);
11481         struct tg3 *tp = netdev_priv(dev);
11482         int err;
11483
11484         if (!netif_running(dev))
11485                 return 0;
11486
11487         pci_restore_state(tp->pdev);
11488
11489         err = tg3_set_power_state(tp, PCI_D0);
11490         if (err)
11491                 return err;
11492
11493         netif_device_attach(dev);
11494
11495         tg3_full_lock(tp, 0);
11496
11497         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11498         tg3_init_hw(tp);
11499
11500         tp->timer.expires = jiffies + tp->timer_offset;
11501         add_timer(&tp->timer);
11502
11503         tg3_netif_start(tp);
11504
11505         tg3_full_unlock(tp);
11506
11507         return 0;
11508 }
11509
11510 static struct pci_driver tg3_driver = {
11511         .name           = DRV_MODULE_NAME,
11512         .id_table       = tg3_pci_tbl,
11513         .probe          = tg3_init_one,
11514         .remove         = __devexit_p(tg3_remove_one),
11515         .suspend        = tg3_suspend,
11516         .resume         = tg3_resume
11517 };
11518
11519 static int __init tg3_init(void)
11520 {
11521         return pci_module_init(&tg3_driver);
11522 }
11523
11524 static void __exit tg3_cleanup(void)
11525 {
11526         pci_unregister_driver(&tg3_driver);
11527 }
11528
11529 module_init(tg3_init);
11530 module_exit(tg3_cleanup);