netdev: convert non-obvious instances to use ARRAY_SIZE()
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.83"
68 #define DRV_MODULE_RELDATE      "October 10, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
320 {
321         writel(val, tp->aperegs + off);
322 }
323
324 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
325 {
326         return (readl(tp->aperegs + off));
327 }
328
329 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         unsigned long flags;
332
333         spin_lock_irqsave(&tp->indirect_lock, flags);
334         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
336         spin_unlock_irqrestore(&tp->indirect_lock, flags);
337 }
338
339 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
340 {
341         writel(val, tp->regs + off);
342         readl(tp->regs + off);
343 }
344
345 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
346 {
347         unsigned long flags;
348         u32 val;
349
350         spin_lock_irqsave(&tp->indirect_lock, flags);
351         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353         spin_unlock_irqrestore(&tp->indirect_lock, flags);
354         return val;
355 }
356
357 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         unsigned long flags;
360
361         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363                                        TG3_64BIT_REG_LOW, val);
364                 return;
365         }
366         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368                                        TG3_64BIT_REG_LOW, val);
369                 return;
370         }
371
372         spin_lock_irqsave(&tp->indirect_lock, flags);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375         spin_unlock_irqrestore(&tp->indirect_lock, flags);
376
377         /* In indirect mode when disabling interrupts, we also need
378          * to clear the interrupt bit in the GRC local ctrl register.
379          */
380         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
381             (val == 0x1)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
384         }
385 }
386
387 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 /* usec_wait specifies the wait time in usec when writing to certain registers
400  * where it is unsafe to read back the register without some delay.
401  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
403  */
404 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
405 {
406         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408                 /* Non-posted methods */
409                 tp->write32(tp, off, val);
410         else {
411                 /* Posted method */
412                 tg3_write32(tp, off, val);
413                 if (usec_wait)
414                         udelay(usec_wait);
415                 tp->read32(tp, off);
416         }
417         /* Wait again after the read for the posted method to guarantee that
418          * the wait time is met.
419          */
420         if (usec_wait)
421                 udelay(usec_wait);
422 }
423
424 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
425 {
426         tp->write32_mbox(tp, off, val);
427         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429                 tp->read32_mbox(tp, off);
430 }
431
432 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
433 {
434         void __iomem *mbox = tp->regs + off;
435         writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
437                 writel(val, mbox);
438         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
439                 readl(mbox);
440 }
441
442 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
443 {
444         return (readl(tp->regs + off + GRCMBOX_BASE));
445 }
446
447 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off + GRCMBOX_BASE);
450 }
451
452 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
453 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
454 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
455 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
456 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
457
458 #define tw32(reg,val)           tp->write32(tp, reg, val)
459 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
460 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
461 #define tr32(reg)               tp->read32(tp, reg)
462
463 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
464 {
465         unsigned long flags;
466
467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
469                 return;
470
471         spin_lock_irqsave(&tp->indirect_lock, flags);
472         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
475
476                 /* Always leave this as zero. */
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         } else {
479                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
481
482                 /* Always leave this as zero. */
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
484         }
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 }
487
488 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
489 {
490         unsigned long flags;
491
492         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
494                 *val = 0;
495                 return;
496         }
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503                 /* Always leave this as zero. */
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         } else {
506                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507                 *val = tr32(TG3PCI_MEM_WIN_DATA);
508
509                 /* Always leave this as zero. */
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         }
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 }
514
515 static void tg3_ape_lock_init(struct tg3 *tp)
516 {
517         int i;
518
519         /* Make sure the driver hasn't any stale locks. */
520         for (i = 0; i < 8; i++)
521                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522                                 APE_LOCK_GRANT_DRIVER);
523 }
524
525 static int tg3_ape_lock(struct tg3 *tp, int locknum)
526 {
527         int i, off;
528         int ret = 0;
529         u32 status;
530
531         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
532                 return 0;
533
534         switch (locknum) {
535                 case TG3_APE_LOCK_MEM:
536                         break;
537                 default:
538                         return -EINVAL;
539         }
540
541         off = 4 * locknum;
542
543         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
544
545         /* Wait for up to 1 millisecond to acquire lock. */
546         for (i = 0; i < 100; i++) {
547                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548                 if (status == APE_LOCK_GRANT_DRIVER)
549                         break;
550                 udelay(10);
551         }
552
553         if (status != APE_LOCK_GRANT_DRIVER) {
554                 /* Revoke the lock request. */
555                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556                                 APE_LOCK_GRANT_DRIVER);
557
558                 ret = -EBUSY;
559         }
560
561         return ret;
562 }
563
564 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
565 {
566         int off;
567
568         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
569                 return;
570
571         switch (locknum) {
572                 case TG3_APE_LOCK_MEM:
573                         break;
574                 default:
575                         return;
576         }
577
578         off = 4 * locknum;
579         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
580 }
581
582 static void tg3_disable_ints(struct tg3 *tp)
583 {
584         tw32(TG3PCI_MISC_HOST_CTRL,
585              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
586         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
587 }
588
589 static inline void tg3_cond_int(struct tg3 *tp)
590 {
591         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592             (tp->hw_status->status & SD_STATUS_UPDATED))
593                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
594         else
595                 tw32(HOSTCC_MODE, tp->coalesce_mode |
596                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
597 }
598
599 static void tg3_enable_ints(struct tg3 *tp)
600 {
601         tp->irq_sync = 0;
602         wmb();
603
604         tw32(TG3PCI_MISC_HOST_CTRL,
605              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
606         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607                        (tp->last_tag << 24));
608         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610                                (tp->last_tag << 24));
611         tg3_cond_int(tp);
612 }
613
614 static inline unsigned int tg3_has_work(struct tg3 *tp)
615 {
616         struct tg3_hw_status *sblk = tp->hw_status;
617         unsigned int work_exists = 0;
618
619         /* check for phy events */
620         if (!(tp->tg3_flags &
621               (TG3_FLAG_USE_LINKCHG_REG |
622                TG3_FLAG_POLL_SERDES))) {
623                 if (sblk->status & SD_STATUS_LINK_CHG)
624                         work_exists = 1;
625         }
626         /* check for RX/TX work to do */
627         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
629                 work_exists = 1;
630
631         return work_exists;
632 }
633
634 /* tg3_restart_ints
635  *  similar to tg3_enable_ints, but it accurately determines whether there
636  *  is new work pending and can return without flushing the PIO write
637  *  which reenables interrupts
638  */
639 static void tg3_restart_ints(struct tg3 *tp)
640 {
641         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642                      tp->last_tag << 24);
643         mmiowb();
644
645         /* When doing tagged status, this work check is unnecessary.
646          * The last_tag we write above tells the chip which piece of
647          * work we've completed.
648          */
649         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
650             tg3_has_work(tp))
651                 tw32(HOSTCC_MODE, tp->coalesce_mode |
652                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
653 }
654
655 static inline void tg3_netif_stop(struct tg3 *tp)
656 {
657         tp->dev->trans_start = jiffies; /* prevent tx timeout */
658         napi_disable(&tp->napi);
659         netif_tx_disable(tp->dev);
660 }
661
662 static inline void tg3_netif_start(struct tg3 *tp)
663 {
664         netif_wake_queue(tp->dev);
665         /* NOTE: unconditional netif_wake_queue is only appropriate
666          * so long as all callers are assured to have free tx slots
667          * (such as after tg3_init_hw)
668          */
669         napi_enable(&tp->napi);
670         tp->hw_status->status |= SD_STATUS_UPDATED;
671         tg3_enable_ints(tp);
672 }
673
674 static void tg3_switch_clocks(struct tg3 *tp)
675 {
676         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
677         u32 orig_clock_ctrl;
678
679         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
681                 return;
682
683         orig_clock_ctrl = clock_ctrl;
684         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685                        CLOCK_CTRL_CLKRUN_OENABLE |
686                        0x1f);
687         tp->pci_clock_ctrl = clock_ctrl;
688
689         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
691                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
692                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
693                 }
694         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
695                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
696                             clock_ctrl |
697                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
698                             40);
699                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
701                             40);
702         }
703         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
704 }
705
706 #define PHY_BUSY_LOOPS  5000
707
708 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         *val = 0x0;
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
727
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0) {
745                 *val = frame_val & MI_COM_DATA_MASK;
746                 ret = 0;
747         }
748
749         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750                 tw32_f(MAC_MI_MODE, tp->mi_mode);
751                 udelay(80);
752         }
753
754         return ret;
755 }
756
757 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
758 {
759         u32 frame_val;
760         unsigned int loops;
761         int ret;
762
763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
765                 return 0;
766
767         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
768                 tw32_f(MAC_MI_MODE,
769                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
770                 udelay(80);
771         }
772
773         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774                       MI_COM_PHY_ADDR_MASK);
775         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776                       MI_COM_REG_ADDR_MASK);
777         frame_val |= (val & MI_COM_DATA_MASK);
778         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
779
780         tw32_f(MAC_MI_COM, frame_val);
781
782         loops = PHY_BUSY_LOOPS;
783         while (loops != 0) {
784                 udelay(10);
785                 frame_val = tr32(MAC_MI_COM);
786                 if ((frame_val & MI_COM_BUSY) == 0) {
787                         udelay(5);
788                         frame_val = tr32(MAC_MI_COM);
789                         break;
790                 }
791                 loops -= 1;
792         }
793
794         ret = -EBUSY;
795         if (loops != 0)
796                 ret = 0;
797
798         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799                 tw32_f(MAC_MI_MODE, tp->mi_mode);
800                 udelay(80);
801         }
802
803         return ret;
804 }
805
806 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
807 {
808         u32 phy;
809
810         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
812                 return;
813
814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
815                 u32 ephy;
816
817                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
819                                      ephy | MII_TG3_EPHY_SHADOW_EN);
820                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
821                                 if (enable)
822                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
823                                 else
824                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
826                         }
827                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
828                 }
829         } else {
830                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831                       MII_TG3_AUXCTL_SHDWSEL_MISC;
832                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
834                         if (enable)
835                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836                         else
837                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838                         phy |= MII_TG3_AUXCTL_MISC_WREN;
839                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
840                 }
841         }
842 }
843
844 static void tg3_phy_set_wirespeed(struct tg3 *tp)
845 {
846         u32 val;
847
848         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
849                 return;
850
851         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854                              (val | (1 << 15) | (1 << 4)));
855 }
856
857 static int tg3_bmcr_reset(struct tg3 *tp)
858 {
859         u32 phy_control;
860         int limit, err;
861
862         /* OK, reset it, and poll the BMCR_RESET bit until it
863          * clears or we time out.
864          */
865         phy_control = BMCR_RESET;
866         err = tg3_writephy(tp, MII_BMCR, phy_control);
867         if (err != 0)
868                 return -EBUSY;
869
870         limit = 5000;
871         while (limit--) {
872                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
873                 if (err != 0)
874                         return -EBUSY;
875
876                 if ((phy_control & BMCR_RESET) == 0) {
877                         udelay(40);
878                         break;
879                 }
880                 udelay(10);
881         }
882         if (limit <= 0)
883                 return -EBUSY;
884
885         return 0;
886 }
887
888 static int tg3_wait_macro_done(struct tg3 *tp)
889 {
890         int limit = 100;
891
892         while (limit--) {
893                 u32 tmp32;
894
895                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896                         if ((tmp32 & 0x1000) == 0)
897                                 break;
898                 }
899         }
900         if (limit <= 0)
901                 return -EBUSY;
902
903         return 0;
904 }
905
906 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
907 {
908         static const u32 test_pat[4][6] = {
909         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
913         };
914         int chan;
915
916         for (chan = 0; chan < 4; chan++) {
917                 int i;
918
919                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920                              (chan * 0x2000) | 0x0200);
921                 tg3_writephy(tp, 0x16, 0x0002);
922
923                 for (i = 0; i < 6; i++)
924                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
925                                      test_pat[chan][i]);
926
927                 tg3_writephy(tp, 0x16, 0x0202);
928                 if (tg3_wait_macro_done(tp)) {
929                         *resetp = 1;
930                         return -EBUSY;
931                 }
932
933                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934                              (chan * 0x2000) | 0x0200);
935                 tg3_writephy(tp, 0x16, 0x0082);
936                 if (tg3_wait_macro_done(tp)) {
937                         *resetp = 1;
938                         return -EBUSY;
939                 }
940
941                 tg3_writephy(tp, 0x16, 0x0802);
942                 if (tg3_wait_macro_done(tp)) {
943                         *resetp = 1;
944                         return -EBUSY;
945                 }
946
947                 for (i = 0; i < 6; i += 2) {
948                         u32 low, high;
949
950                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952                             tg3_wait_macro_done(tp)) {
953                                 *resetp = 1;
954                                 return -EBUSY;
955                         }
956                         low &= 0x7fff;
957                         high &= 0x000f;
958                         if (low != test_pat[chan][i] ||
959                             high != test_pat[chan][i+1]) {
960                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
963
964                                 return -EBUSY;
965                         }
966                 }
967         }
968
969         return 0;
970 }
971
972 static int tg3_phy_reset_chanpat(struct tg3 *tp)
973 {
974         int chan;
975
976         for (chan = 0; chan < 4; chan++) {
977                 int i;
978
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980                              (chan * 0x2000) | 0x0200);
981                 tg3_writephy(tp, 0x16, 0x0002);
982                 for (i = 0; i < 6; i++)
983                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984                 tg3_writephy(tp, 0x16, 0x0202);
985                 if (tg3_wait_macro_done(tp))
986                         return -EBUSY;
987         }
988
989         return 0;
990 }
991
992 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
993 {
994         u32 reg32, phy9_orig;
995         int retries, do_phy_reset, err;
996
997         retries = 10;
998         do_phy_reset = 1;
999         do {
1000                 if (do_phy_reset) {
1001                         err = tg3_bmcr_reset(tp);
1002                         if (err)
1003                                 return err;
1004                         do_phy_reset = 0;
1005                 }
1006
1007                 /* Disable transmitter and interrupt.  */
1008                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1009                         continue;
1010
1011                 reg32 |= 0x3000;
1012                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1013
1014                 /* Set full-duplex, 1000 mbps.  */
1015                 tg3_writephy(tp, MII_BMCR,
1016                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1017
1018                 /* Set to master mode.  */
1019                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1020                         continue;
1021
1022                 tg3_writephy(tp, MII_TG3_CTRL,
1023                              (MII_TG3_CTRL_AS_MASTER |
1024                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1025
1026                 /* Enable SM_DSP_CLOCK and 6dB.  */
1027                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1028
1029                 /* Block the PHY control access.  */
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1032
1033                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1034                 if (!err)
1035                         break;
1036         } while (--retries);
1037
1038         err = tg3_phy_reset_chanpat(tp);
1039         if (err)
1040                 return err;
1041
1042         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1044
1045         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046         tg3_writephy(tp, 0x16, 0x0000);
1047
1048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050                 /* Set Extended packet length bit for jumbo frames */
1051                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1052         }
1053         else {
1054                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1055         }
1056
1057         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1058
1059         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1060                 reg32 &= ~0x3000;
1061                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1062         } else if (!err)
1063                 err = -EBUSY;
1064
1065         return err;
1066 }
1067
1068 static void tg3_link_report(struct tg3 *);
1069
1070 /* This will reset the tigon3 PHY if there is no valid
1071  * link unless the FORCE argument is non-zero.
1072  */
1073 static int tg3_phy_reset(struct tg3 *tp)
1074 {
1075         u32 phy_status;
1076         int err;
1077
1078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1079                 u32 val;
1080
1081                 val = tr32(GRC_MISC_CFG);
1082                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1083                 udelay(40);
1084         }
1085         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1086         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1087         if (err != 0)
1088                 return -EBUSY;
1089
1090         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091                 netif_carrier_off(tp->dev);
1092                 tg3_link_report(tp);
1093         }
1094
1095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098                 err = tg3_phy_reset_5703_4_5(tp);
1099                 if (err)
1100                         return err;
1101                 goto out;
1102         }
1103
1104         err = tg3_bmcr_reset(tp);
1105         if (err)
1106                 return err;
1107
1108 out:
1109         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1116         }
1117         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118                 tg3_writephy(tp, 0x1c, 0x8d68);
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120         }
1121         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130         }
1131         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136                         tg3_writephy(tp, MII_TG3_TEST1,
1137                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1138                 } else
1139                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1140                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1141         }
1142         /* Set Extended packet length bit (bit 14) on all chips that */
1143         /* support jumbo frames */
1144         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145                 /* Cannot do read-modify-write on 5401 */
1146                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1147         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1148                 u32 phy_reg;
1149
1150                 /* Set bit 14 with read-modify-write to preserve other bits */
1151                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1154         }
1155
1156         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157          * jumbo frames transmission.
1158          */
1159         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1160                 u32 phy_reg;
1161
1162                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1165         }
1166
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1168                 /* adjust output voltage */
1169                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1170         }
1171
1172         tg3_phy_toggle_automdix(tp, 1);
1173         tg3_phy_set_wirespeed(tp);
1174         return 0;
1175 }
1176
1177 static void tg3_frob_aux_power(struct tg3 *tp)
1178 {
1179         struct tg3 *tp_peer = tp;
1180
1181         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1182                 return;
1183
1184         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186                 struct net_device *dev_peer;
1187
1188                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1189                 /* remove_one() may have been run on the peer. */
1190                 if (!dev_peer)
1191                         tp_peer = tp;
1192                 else
1193                         tp_peer = netdev_priv(dev_peer);
1194         }
1195
1196         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1202                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203                                     (GRC_LCLCTRL_GPIO_OE0 |
1204                                      GRC_LCLCTRL_GPIO_OE1 |
1205                                      GRC_LCLCTRL_GPIO_OE2 |
1206                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1208                                     100);
1209                 } else {
1210                         u32 no_gpio2;
1211                         u32 grc_local_ctrl = 0;
1212
1213                         if (tp_peer != tp &&
1214                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1215                                 return;
1216
1217                         /* Workaround to prevent overdrawing Amps. */
1218                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1219                             ASIC_REV_5714) {
1220                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1221                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                             grc_local_ctrl, 100);
1223                         }
1224
1225                         /* On 5753 and variants, GPIO2 cannot be used. */
1226                         no_gpio2 = tp->nic_sram_data_cfg &
1227                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1228
1229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1230                                          GRC_LCLCTRL_GPIO_OE1 |
1231                                          GRC_LCLCTRL_GPIO_OE2 |
1232                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1234                         if (no_gpio2) {
1235                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1237                         }
1238                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239                                                     grc_local_ctrl, 100);
1240
1241                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1242
1243                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244                                                     grc_local_ctrl, 100);
1245
1246                         if (!no_gpio2) {
1247                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1248                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249                                             grc_local_ctrl, 100);
1250                         }
1251                 }
1252         } else {
1253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255                         if (tp_peer != tp &&
1256                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1257                                 return;
1258
1259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260                                     (GRC_LCLCTRL_GPIO_OE1 |
1261                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                     GRC_LCLCTRL_GPIO_OE1, 100);
1265
1266                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267                                     (GRC_LCLCTRL_GPIO_OE1 |
1268                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1269                 }
1270         }
1271 }
1272
1273 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1274 {
1275         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1276                 return 1;
1277         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278                 if (speed != SPEED_10)
1279                         return 1;
1280         } else if (speed == SPEED_10)
1281                 return 1;
1282
1283         return 0;
1284 }
1285
1286 static int tg3_setup_phy(struct tg3 *, int);
1287
1288 #define RESET_KIND_SHUTDOWN     0
1289 #define RESET_KIND_INIT         1
1290 #define RESET_KIND_SUSPEND      2
1291
1292 static void tg3_write_sig_post_reset(struct tg3 *, int);
1293 static int tg3_halt_cpu(struct tg3 *, u32);
1294 static int tg3_nvram_lock(struct tg3 *);
1295 static void tg3_nvram_unlock(struct tg3 *);
1296
1297 static void tg3_power_down_phy(struct tg3 *tp)
1298 {
1299         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1303
1304                         sg_dig_ctrl |=
1305                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1308                 }
1309                 return;
1310         }
1311
1312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1313                 u32 val;
1314
1315                 tg3_bmcr_reset(tp);
1316                 val = tr32(GRC_MISC_CFG);
1317                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1318                 udelay(40);
1319                 return;
1320         } else {
1321                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1324         }
1325
1326         /* The PHY should not be powered down on some chips because
1327          * of bugs.
1328          */
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1333                 return;
1334         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1335 }
1336
1337 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1338 {
1339         u32 misc_host_ctrl;
1340         u16 power_control, power_caps;
1341         int pm = tp->pm_cap;
1342
1343         /* Make sure register accesses (indirect or otherwise)
1344          * will function correctly.
1345          */
1346         pci_write_config_dword(tp->pdev,
1347                                TG3PCI_MISC_HOST_CTRL,
1348                                tp->misc_host_ctrl);
1349
1350         pci_read_config_word(tp->pdev,
1351                              pm + PCI_PM_CTRL,
1352                              &power_control);
1353         power_control |= PCI_PM_CTRL_PME_STATUS;
1354         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1355         switch (state) {
1356         case PCI_D0:
1357                 power_control |= 0;
1358                 pci_write_config_word(tp->pdev,
1359                                       pm + PCI_PM_CTRL,
1360                                       power_control);
1361                 udelay(100);    /* Delay after power state change */
1362
1363                 /* Switch out of Vaux if it is a NIC */
1364                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1365                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1366
1367                 return 0;
1368
1369         case PCI_D1:
1370                 power_control |= 1;
1371                 break;
1372
1373         case PCI_D2:
1374                 power_control |= 2;
1375                 break;
1376
1377         case PCI_D3hot:
1378                 power_control |= 3;
1379                 break;
1380
1381         default:
1382                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1383                        "requested.\n",
1384                        tp->dev->name, state);
1385                 return -EINVAL;
1386         };
1387
1388         power_control |= PCI_PM_CTRL_PME_ENABLE;
1389
1390         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391         tw32(TG3PCI_MISC_HOST_CTRL,
1392              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1393
1394         if (tp->link_config.phy_is_low_power == 0) {
1395                 tp->link_config.phy_is_low_power = 1;
1396                 tp->link_config.orig_speed = tp->link_config.speed;
1397                 tp->link_config.orig_duplex = tp->link_config.duplex;
1398                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1399         }
1400
1401         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1402                 tp->link_config.speed = SPEED_10;
1403                 tp->link_config.duplex = DUPLEX_HALF;
1404                 tp->link_config.autoneg = AUTONEG_ENABLE;
1405                 tg3_setup_phy(tp, 0);
1406         }
1407
1408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1409                 u32 val;
1410
1411                 val = tr32(GRC_VCPU_EXT_CTRL);
1412                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1414                 int i;
1415                 u32 val;
1416
1417                 for (i = 0; i < 200; i++) {
1418                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1420                                 break;
1421                         msleep(1);
1422                 }
1423         }
1424         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426                                                      WOL_DRV_STATE_SHUTDOWN |
1427                                                      WOL_DRV_WOL |
1428                                                      WOL_SET_MAGIC_PKT);
1429
1430         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1431
1432         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1433                 u32 mac_mode;
1434
1435                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1437                         udelay(40);
1438
1439                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1441                         else
1442                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1443
1444                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1446                             ASIC_REV_5700) {
1447                                 u32 speed = (tp->tg3_flags &
1448                                              TG3_FLAG_WOL_SPEED_100MB) ?
1449                                              SPEED_100 : SPEED_10;
1450                                 if (tg3_5700_link_polarity(tp, speed))
1451                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1452                                 else
1453                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1454                         }
1455                 } else {
1456                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1457                 }
1458
1459                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1460                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1461
1462                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1465
1466                 tw32_f(MAC_MODE, mac_mode);
1467                 udelay(100);
1468
1469                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1470                 udelay(10);
1471         }
1472
1473         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1476                 u32 base_val;
1477
1478                 base_val = tp->pci_clock_ctrl;
1479                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480                              CLOCK_CTRL_TXCLK_DISABLE);
1481
1482                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1484         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1485                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1486                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1487                 /* do nothing */
1488         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1489                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490                 u32 newbits1, newbits2;
1491
1492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495                                     CLOCK_CTRL_TXCLK_DISABLE |
1496                                     CLOCK_CTRL_ALTCLK);
1497                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499                         newbits1 = CLOCK_CTRL_625_CORE;
1500                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1501                 } else {
1502                         newbits1 = CLOCK_CTRL_ALTCLK;
1503                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1504                 }
1505
1506                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1507                             40);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1510                             40);
1511
1512                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1513                         u32 newbits3;
1514
1515                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518                                             CLOCK_CTRL_TXCLK_DISABLE |
1519                                             CLOCK_CTRL_44MHZ_CORE);
1520                         } else {
1521                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1522                         }
1523
1524                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525                                     tp->pci_clock_ctrl | newbits3, 40);
1526                 }
1527         }
1528
1529         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1530             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1532                 tg3_power_down_phy(tp);
1533
1534         tg3_frob_aux_power(tp);
1535
1536         /* Workaround for unstable PLL clock */
1537         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539                 u32 val = tr32(0x7d00);
1540
1541                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1542                 tw32(0x7d00, val);
1543                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1544                         int err;
1545
1546                         err = tg3_nvram_lock(tp);
1547                         tg3_halt_cpu(tp, RX_CPU_BASE);
1548                         if (!err)
1549                                 tg3_nvram_unlock(tp);
1550                 }
1551         }
1552
1553         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1554
1555         /* Finally, set the new power state. */
1556         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1557         udelay(100);    /* Delay after power state change */
1558
1559         return 0;
1560 }
1561
1562 static void tg3_link_report(struct tg3 *tp)
1563 {
1564         if (!netif_carrier_ok(tp->dev)) {
1565                 if (netif_msg_link(tp))
1566                         printk(KERN_INFO PFX "%s: Link is down.\n",
1567                                tp->dev->name);
1568         } else if (netif_msg_link(tp)) {
1569                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1570                        tp->dev->name,
1571                        (tp->link_config.active_speed == SPEED_1000 ?
1572                         1000 :
1573                         (tp->link_config.active_speed == SPEED_100 ?
1574                          100 : 10)),
1575                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1576                         "full" : "half"));
1577
1578                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1579                        "%s for RX.\n",
1580                        tp->dev->name,
1581                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1583         }
1584 }
1585
1586 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1587 {
1588         u32 new_tg3_flags = 0;
1589         u32 old_rx_mode = tp->rx_mode;
1590         u32 old_tx_mode = tp->tx_mode;
1591
1592         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1593
1594                 /* Convert 1000BaseX flow control bits to 1000BaseT
1595                  * bits before resolving flow control.
1596                  */
1597                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599                                        ADVERTISE_PAUSE_ASYM);
1600                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1601
1602                         if (local_adv & ADVERTISE_1000XPAUSE)
1603                                 local_adv |= ADVERTISE_PAUSE_CAP;
1604                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1606                         if (remote_adv & LPA_1000XPAUSE)
1607                                 remote_adv |= LPA_PAUSE_CAP;
1608                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1609                                 remote_adv |= LPA_PAUSE_ASYM;
1610                 }
1611
1612                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614                                 if (remote_adv & LPA_PAUSE_CAP)
1615                                         new_tg3_flags |=
1616                                                 (TG3_FLAG_RX_PAUSE |
1617                                                 TG3_FLAG_TX_PAUSE);
1618                                 else if (remote_adv & LPA_PAUSE_ASYM)
1619                                         new_tg3_flags |=
1620                                                 (TG3_FLAG_RX_PAUSE);
1621                         } else {
1622                                 if (remote_adv & LPA_PAUSE_CAP)
1623                                         new_tg3_flags |=
1624                                                 (TG3_FLAG_RX_PAUSE |
1625                                                 TG3_FLAG_TX_PAUSE);
1626                         }
1627                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628                         if ((remote_adv & LPA_PAUSE_CAP) &&
1629                         (remote_adv & LPA_PAUSE_ASYM))
1630                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1631                 }
1632
1633                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634                 tp->tg3_flags |= new_tg3_flags;
1635         } else {
1636                 new_tg3_flags = tp->tg3_flags;
1637         }
1638
1639         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1641         else
1642                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1643
1644         if (old_rx_mode != tp->rx_mode) {
1645                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1646         }
1647
1648         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1650         else
1651                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1652
1653         if (old_tx_mode != tp->tx_mode) {
1654                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1655         }
1656 }
1657
1658 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1659 {
1660         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661         case MII_TG3_AUX_STAT_10HALF:
1662                 *speed = SPEED_10;
1663                 *duplex = DUPLEX_HALF;
1664                 break;
1665
1666         case MII_TG3_AUX_STAT_10FULL:
1667                 *speed = SPEED_10;
1668                 *duplex = DUPLEX_FULL;
1669                 break;
1670
1671         case MII_TG3_AUX_STAT_100HALF:
1672                 *speed = SPEED_100;
1673                 *duplex = DUPLEX_HALF;
1674                 break;
1675
1676         case MII_TG3_AUX_STAT_100FULL:
1677                 *speed = SPEED_100;
1678                 *duplex = DUPLEX_FULL;
1679                 break;
1680
1681         case MII_TG3_AUX_STAT_1000HALF:
1682                 *speed = SPEED_1000;
1683                 *duplex = DUPLEX_HALF;
1684                 break;
1685
1686         case MII_TG3_AUX_STAT_1000FULL:
1687                 *speed = SPEED_1000;
1688                 *duplex = DUPLEX_FULL;
1689                 break;
1690
1691         default:
1692                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1694                                  SPEED_10;
1695                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1696                                   DUPLEX_HALF;
1697                         break;
1698                 }
1699                 *speed = SPEED_INVALID;
1700                 *duplex = DUPLEX_INVALID;
1701                 break;
1702         };
1703 }
1704
1705 static void tg3_phy_copper_begin(struct tg3 *tp)
1706 {
1707         u32 new_adv;
1708         int i;
1709
1710         if (tp->link_config.phy_is_low_power) {
1711                 /* Entering low power mode.  Disable gigabit and
1712                  * 100baseT advertisements.
1713                  */
1714                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1715
1716                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1720
1721                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722         } else if (tp->link_config.speed == SPEED_INVALID) {
1723                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724                         tp->link_config.advertising &=
1725                                 ~(ADVERTISED_1000baseT_Half |
1726                                   ADVERTISED_1000baseT_Full);
1727
1728                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730                         new_adv |= ADVERTISE_10HALF;
1731                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732                         new_adv |= ADVERTISE_10FULL;
1733                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734                         new_adv |= ADVERTISE_100HALF;
1735                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736                         new_adv |= ADVERTISE_100FULL;
1737                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1738
1739                 if (tp->link_config.advertising &
1740                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1741                         new_adv = 0;
1742                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1751                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1752                 } else {
1753                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1754                 }
1755         } else {
1756                 /* Asking for a specific link mode. */
1757                 if (tp->link_config.speed == SPEED_1000) {
1758                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1760
1761                         if (tp->link_config.duplex == DUPLEX_FULL)
1762                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1763                         else
1764                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1769                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1770                 } else {
1771                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1772
1773                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774                         if (tp->link_config.speed == SPEED_100) {
1775                                 if (tp->link_config.duplex == DUPLEX_FULL)
1776                                         new_adv |= ADVERTISE_100FULL;
1777                                 else
1778                                         new_adv |= ADVERTISE_100HALF;
1779                         } else {
1780                                 if (tp->link_config.duplex == DUPLEX_FULL)
1781                                         new_adv |= ADVERTISE_10FULL;
1782                                 else
1783                                         new_adv |= ADVERTISE_10HALF;
1784                         }
1785                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1786                 }
1787         }
1788
1789         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790             tp->link_config.speed != SPEED_INVALID) {
1791                 u32 bmcr, orig_bmcr;
1792
1793                 tp->link_config.active_speed = tp->link_config.speed;
1794                 tp->link_config.active_duplex = tp->link_config.duplex;
1795
1796                 bmcr = 0;
1797                 switch (tp->link_config.speed) {
1798                 default:
1799                 case SPEED_10:
1800                         break;
1801
1802                 case SPEED_100:
1803                         bmcr |= BMCR_SPEED100;
1804                         break;
1805
1806                 case SPEED_1000:
1807                         bmcr |= TG3_BMCR_SPEED1000;
1808                         break;
1809                 };
1810
1811                 if (tp->link_config.duplex == DUPLEX_FULL)
1812                         bmcr |= BMCR_FULLDPLX;
1813
1814                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815                     (bmcr != orig_bmcr)) {
1816                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817                         for (i = 0; i < 1500; i++) {
1818                                 u32 tmp;
1819
1820                                 udelay(10);
1821                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822                                     tg3_readphy(tp, MII_BMSR, &tmp))
1823                                         continue;
1824                                 if (!(tmp & BMSR_LSTATUS)) {
1825                                         udelay(40);
1826                                         break;
1827                                 }
1828                         }
1829                         tg3_writephy(tp, MII_BMCR, bmcr);
1830                         udelay(40);
1831                 }
1832         } else {
1833                 tg3_writephy(tp, MII_BMCR,
1834                              BMCR_ANENABLE | BMCR_ANRESTART);
1835         }
1836 }
1837
1838 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1839 {
1840         int err;
1841
1842         /* Turn off tap power management. */
1843         /* Set Extended packet length bit */
1844         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1845
1846         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1848
1849         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1851
1852         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1854
1855         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1857
1858         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1860
1861         udelay(40);
1862
1863         return err;
1864 }
1865
1866 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1867 {
1868         u32 adv_reg, all_mask = 0;
1869
1870         if (mask & ADVERTISED_10baseT_Half)
1871                 all_mask |= ADVERTISE_10HALF;
1872         if (mask & ADVERTISED_10baseT_Full)
1873                 all_mask |= ADVERTISE_10FULL;
1874         if (mask & ADVERTISED_100baseT_Half)
1875                 all_mask |= ADVERTISE_100HALF;
1876         if (mask & ADVERTISED_100baseT_Full)
1877                 all_mask |= ADVERTISE_100FULL;
1878
1879         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1880                 return 0;
1881
1882         if ((adv_reg & all_mask) != all_mask)
1883                 return 0;
1884         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1885                 u32 tg3_ctrl;
1886
1887                 all_mask = 0;
1888                 if (mask & ADVERTISED_1000baseT_Half)
1889                         all_mask |= ADVERTISE_1000HALF;
1890                 if (mask & ADVERTISED_1000baseT_Full)
1891                         all_mask |= ADVERTISE_1000FULL;
1892
1893                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1894                         return 0;
1895
1896                 if ((tg3_ctrl & all_mask) != all_mask)
1897                         return 0;
1898         }
1899         return 1;
1900 }
1901
1902 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1903 {
1904         int current_link_up;
1905         u32 bmsr, dummy;
1906         u16 current_speed;
1907         u8 current_duplex;
1908         int i, err;
1909
1910         tw32(MAC_EVENT, 0);
1911
1912         tw32_f(MAC_STATUS,
1913              (MAC_STATUS_SYNC_CHANGED |
1914               MAC_STATUS_CFG_CHANGED |
1915               MAC_STATUS_MI_COMPLETION |
1916               MAC_STATUS_LNKSTATE_CHANGED));
1917         udelay(40);
1918
1919         tp->mi_mode = MAC_MI_MODE_BASE;
1920         tw32_f(MAC_MI_MODE, tp->mi_mode);
1921         udelay(80);
1922
1923         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1924
1925         /* Some third-party PHYs need to be reset on link going
1926          * down.
1927          */
1928         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931             netif_carrier_ok(tp->dev)) {
1932                 tg3_readphy(tp, MII_BMSR, &bmsr);
1933                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934                     !(bmsr & BMSR_LSTATUS))
1935                         force_reset = 1;
1936         }
1937         if (force_reset)
1938                 tg3_phy_reset(tp);
1939
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941                 tg3_readphy(tp, MII_BMSR, &bmsr);
1942                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1944                         bmsr = 0;
1945
1946                 if (!(bmsr & BMSR_LSTATUS)) {
1947                         err = tg3_init_5401phy_dsp(tp);
1948                         if (err)
1949                                 return err;
1950
1951                         tg3_readphy(tp, MII_BMSR, &bmsr);
1952                         for (i = 0; i < 1000; i++) {
1953                                 udelay(10);
1954                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955                                     (bmsr & BMSR_LSTATUS)) {
1956                                         udelay(40);
1957                                         break;
1958                                 }
1959                         }
1960
1961                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962                             !(bmsr & BMSR_LSTATUS) &&
1963                             tp->link_config.active_speed == SPEED_1000) {
1964                                 err = tg3_phy_reset(tp);
1965                                 if (!err)
1966                                         err = tg3_init_5401phy_dsp(tp);
1967                                 if (err)
1968                                         return err;
1969                         }
1970                 }
1971         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973                 /* 5701 {A0,B0} CRC bug workaround */
1974                 tg3_writephy(tp, 0x15, 0x0a75);
1975                 tg3_writephy(tp, 0x1c, 0x8c68);
1976                 tg3_writephy(tp, 0x1c, 0x8d68);
1977                 tg3_writephy(tp, 0x1c, 0x8c68);
1978         }
1979
1980         /* Clear pending interrupts... */
1981         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983
1984         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1986         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1987                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1988
1989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1994                 else
1995                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1996         }
1997
1998         current_link_up = 0;
1999         current_speed = SPEED_INVALID;
2000         current_duplex = DUPLEX_INVALID;
2001
2002         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2003                 u32 val;
2004
2005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007                 if (!(val & (1 << 10))) {
2008                         val |= (1 << 10);
2009                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2010                         goto relink;
2011                 }
2012         }
2013
2014         bmsr = 0;
2015         for (i = 0; i < 100; i++) {
2016                 tg3_readphy(tp, MII_BMSR, &bmsr);
2017                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018                     (bmsr & BMSR_LSTATUS))
2019                         break;
2020                 udelay(40);
2021         }
2022
2023         if (bmsr & BMSR_LSTATUS) {
2024                 u32 aux_stat, bmcr;
2025
2026                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027                 for (i = 0; i < 2000; i++) {
2028                         udelay(10);
2029                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2030                             aux_stat)
2031                                 break;
2032                 }
2033
2034                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2035                                              &current_speed,
2036                                              &current_duplex);
2037
2038                 bmcr = 0;
2039                 for (i = 0; i < 200; i++) {
2040                         tg3_readphy(tp, MII_BMCR, &bmcr);
2041                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2042                                 continue;
2043                         if (bmcr && bmcr != 0x7fff)
2044                                 break;
2045                         udelay(10);
2046                 }
2047
2048                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049                         if (bmcr & BMCR_ANENABLE) {
2050                                 current_link_up = 1;
2051
2052                                 /* Force autoneg restart if we are exiting
2053                                  * low power mode.
2054                                  */
2055                                 if (!tg3_copper_is_advertising_all(tp,
2056                                                 tp->link_config.advertising))
2057                                         current_link_up = 0;
2058                         } else {
2059                                 current_link_up = 0;
2060                         }
2061                 } else {
2062                         if (!(bmcr & BMCR_ANENABLE) &&
2063                             tp->link_config.speed == current_speed &&
2064                             tp->link_config.duplex == current_duplex) {
2065                                 current_link_up = 1;
2066                         } else {
2067                                 current_link_up = 0;
2068                         }
2069                 }
2070
2071                 tp->link_config.active_speed = current_speed;
2072                 tp->link_config.active_duplex = current_duplex;
2073         }
2074
2075         if (current_link_up == 1 &&
2076             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078                 u32 local_adv, remote_adv;
2079
2080                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2081                         local_adv = 0;
2082                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2083
2084                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2085                         remote_adv = 0;
2086
2087                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2088
2089                 /* If we are not advertising full pause capability,
2090                  * something is wrong.  Bring the link down and reconfigure.
2091                  */
2092                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093                         current_link_up = 0;
2094                 } else {
2095                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2096                 }
2097         }
2098 relink:
2099         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2100                 u32 tmp;
2101
2102                 tg3_phy_copper_begin(tp);
2103
2104                 tg3_readphy(tp, MII_BMSR, &tmp);
2105                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106                     (tmp & BMSR_LSTATUS))
2107                         current_link_up = 1;
2108         }
2109
2110         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111         if (current_link_up == 1) {
2112                 if (tp->link_config.active_speed == SPEED_100 ||
2113                     tp->link_config.active_speed == SPEED_10)
2114                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2115                 else
2116                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117         } else
2118                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2119
2120         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121         if (tp->link_config.active_duplex == DUPLEX_HALF)
2122                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2123
2124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2125                 if (current_link_up == 1 &&
2126                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2127                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2128                 else
2129                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2130         }
2131
2132         /* ??? Without this setting Netgear GA302T PHY does not
2133          * ??? send/receive packets...
2134          */
2135         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2139                 udelay(80);
2140         }
2141
2142         tw32_f(MAC_MODE, tp->mac_mode);
2143         udelay(40);
2144
2145         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146                 /* Polled via timer. */
2147                 tw32_f(MAC_EVENT, 0);
2148         } else {
2149                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2150         }
2151         udelay(40);
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154             current_link_up == 1 &&
2155             tp->link_config.active_speed == SPEED_1000 &&
2156             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2158                 udelay(120);
2159                 tw32_f(MAC_STATUS,
2160                      (MAC_STATUS_SYNC_CHANGED |
2161                       MAC_STATUS_CFG_CHANGED));
2162                 udelay(40);
2163                 tg3_write_mem(tp,
2164                               NIC_SRAM_FIRMWARE_MBOX,
2165                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2166         }
2167
2168         if (current_link_up != netif_carrier_ok(tp->dev)) {
2169                 if (current_link_up)
2170                         netif_carrier_on(tp->dev);
2171                 else
2172                         netif_carrier_off(tp->dev);
2173                 tg3_link_report(tp);
2174         }
2175
2176         return 0;
2177 }
2178
2179 struct tg3_fiber_aneginfo {
2180         int state;
2181 #define ANEG_STATE_UNKNOWN              0
2182 #define ANEG_STATE_AN_ENABLE            1
2183 #define ANEG_STATE_RESTART_INIT         2
2184 #define ANEG_STATE_RESTART              3
2185 #define ANEG_STATE_DISABLE_LINK_OK      4
2186 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2187 #define ANEG_STATE_ABILITY_DETECT       6
2188 #define ANEG_STATE_ACK_DETECT_INIT      7
2189 #define ANEG_STATE_ACK_DETECT           8
2190 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2191 #define ANEG_STATE_COMPLETE_ACK         10
2192 #define ANEG_STATE_IDLE_DETECT_INIT     11
2193 #define ANEG_STATE_IDLE_DETECT          12
2194 #define ANEG_STATE_LINK_OK              13
2195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2196 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2197
2198         u32 flags;
2199 #define MR_AN_ENABLE            0x00000001
2200 #define MR_RESTART_AN           0x00000002
2201 #define MR_AN_COMPLETE          0x00000004
2202 #define MR_PAGE_RX              0x00000008
2203 #define MR_NP_LOADED            0x00000010
2204 #define MR_TOGGLE_TX            0x00000020
2205 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2206 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2207 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2208 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2212 #define MR_TOGGLE_RX            0x00002000
2213 #define MR_NP_RX                0x00004000
2214
2215 #define MR_LINK_OK              0x80000000
2216
2217         unsigned long link_time, cur_time;
2218
2219         u32 ability_match_cfg;
2220         int ability_match_count;
2221
2222         char ability_match, idle_match, ack_match;
2223
2224         u32 txconfig, rxconfig;
2225 #define ANEG_CFG_NP             0x00000080
2226 #define ANEG_CFG_ACK            0x00000040
2227 #define ANEG_CFG_RF2            0x00000020
2228 #define ANEG_CFG_RF1            0x00000010
2229 #define ANEG_CFG_PS2            0x00000001
2230 #define ANEG_CFG_PS1            0x00008000
2231 #define ANEG_CFG_HD             0x00004000
2232 #define ANEG_CFG_FD             0x00002000
2233 #define ANEG_CFG_INVAL          0x00001f06
2234
2235 };
2236 #define ANEG_OK         0
2237 #define ANEG_DONE       1
2238 #define ANEG_TIMER_ENAB 2
2239 #define ANEG_FAILED     -1
2240
2241 #define ANEG_STATE_SETTLE_TIME  10000
2242
2243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244                                    struct tg3_fiber_aneginfo *ap)
2245 {
2246         unsigned long delta;
2247         u32 rx_cfg_reg;
2248         int ret;
2249
2250         if (ap->state == ANEG_STATE_UNKNOWN) {
2251                 ap->rxconfig = 0;
2252                 ap->link_time = 0;
2253                 ap->cur_time = 0;
2254                 ap->ability_match_cfg = 0;
2255                 ap->ability_match_count = 0;
2256                 ap->ability_match = 0;
2257                 ap->idle_match = 0;
2258                 ap->ack_match = 0;
2259         }
2260         ap->cur_time++;
2261
2262         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2264
2265                 if (rx_cfg_reg != ap->ability_match_cfg) {
2266                         ap->ability_match_cfg = rx_cfg_reg;
2267                         ap->ability_match = 0;
2268                         ap->ability_match_count = 0;
2269                 } else {
2270                         if (++ap->ability_match_count > 1) {
2271                                 ap->ability_match = 1;
2272                                 ap->ability_match_cfg = rx_cfg_reg;
2273                         }
2274                 }
2275                 if (rx_cfg_reg & ANEG_CFG_ACK)
2276                         ap->ack_match = 1;
2277                 else
2278                         ap->ack_match = 0;
2279
2280                 ap->idle_match = 0;
2281         } else {
2282                 ap->idle_match = 1;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->ack_match = 0;
2287
2288                 rx_cfg_reg = 0;
2289         }
2290
2291         ap->rxconfig = rx_cfg_reg;
2292         ret = ANEG_OK;
2293
2294         switch(ap->state) {
2295         case ANEG_STATE_UNKNOWN:
2296                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297                         ap->state = ANEG_STATE_AN_ENABLE;
2298
2299                 /* fallthru */
2300         case ANEG_STATE_AN_ENABLE:
2301                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302                 if (ap->flags & MR_AN_ENABLE) {
2303                         ap->link_time = 0;
2304                         ap->cur_time = 0;
2305                         ap->ability_match_cfg = 0;
2306                         ap->ability_match_count = 0;
2307                         ap->ability_match = 0;
2308                         ap->idle_match = 0;
2309                         ap->ack_match = 0;
2310
2311                         ap->state = ANEG_STATE_RESTART_INIT;
2312                 } else {
2313                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2314                 }
2315                 break;
2316
2317         case ANEG_STATE_RESTART_INIT:
2318                 ap->link_time = ap->cur_time;
2319                 ap->flags &= ~(MR_NP_LOADED);
2320                 ap->txconfig = 0;
2321                 tw32(MAC_TX_AUTO_NEG, 0);
2322                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323                 tw32_f(MAC_MODE, tp->mac_mode);
2324                 udelay(40);
2325
2326                 ret = ANEG_TIMER_ENAB;
2327                 ap->state = ANEG_STATE_RESTART;
2328
2329                 /* fallthru */
2330         case ANEG_STATE_RESTART:
2331                 delta = ap->cur_time - ap->link_time;
2332                 if (delta > ANEG_STATE_SETTLE_TIME) {
2333                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2334                 } else {
2335                         ret = ANEG_TIMER_ENAB;
2336                 }
2337                 break;
2338
2339         case ANEG_STATE_DISABLE_LINK_OK:
2340                 ret = ANEG_DONE;
2341                 break;
2342
2343         case ANEG_STATE_ABILITY_DETECT_INIT:
2344                 ap->flags &= ~(MR_TOGGLE_TX);
2345                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348                 tw32_f(MAC_MODE, tp->mac_mode);
2349                 udelay(40);
2350
2351                 ap->state = ANEG_STATE_ABILITY_DETECT;
2352                 break;
2353
2354         case ANEG_STATE_ABILITY_DETECT:
2355                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2357                 }
2358                 break;
2359
2360         case ANEG_STATE_ACK_DETECT_INIT:
2361                 ap->txconfig |= ANEG_CFG_ACK;
2362                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364                 tw32_f(MAC_MODE, tp->mac_mode);
2365                 udelay(40);
2366
2367                 ap->state = ANEG_STATE_ACK_DETECT;
2368
2369                 /* fallthru */
2370         case ANEG_STATE_ACK_DETECT:
2371                 if (ap->ack_match != 0) {
2372                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2375                         } else {
2376                                 ap->state = ANEG_STATE_AN_ENABLE;
2377                         }
2378                 } else if (ap->ability_match != 0 &&
2379                            ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                 }
2382                 break;
2383
2384         case ANEG_STATE_COMPLETE_ACK_INIT:
2385                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2386                         ret = ANEG_FAILED;
2387                         break;
2388                 }
2389                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390                                MR_LP_ADV_HALF_DUPLEX |
2391                                MR_LP_ADV_SYM_PAUSE |
2392                                MR_LP_ADV_ASYM_PAUSE |
2393                                MR_LP_ADV_REMOTE_FAULT1 |
2394                                MR_LP_ADV_REMOTE_FAULT2 |
2395                                MR_LP_ADV_NEXT_PAGE |
2396                                MR_TOGGLE_RX |
2397                                MR_NP_RX);
2398                 if (ap->rxconfig & ANEG_CFG_FD)
2399                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400                 if (ap->rxconfig & ANEG_CFG_HD)
2401                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402                 if (ap->rxconfig & ANEG_CFG_PS1)
2403                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404                 if (ap->rxconfig & ANEG_CFG_PS2)
2405                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406                 if (ap->rxconfig & ANEG_CFG_RF1)
2407                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408                 if (ap->rxconfig & ANEG_CFG_RF2)
2409                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410                 if (ap->rxconfig & ANEG_CFG_NP)
2411                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2412
2413                 ap->link_time = ap->cur_time;
2414
2415                 ap->flags ^= (MR_TOGGLE_TX);
2416                 if (ap->rxconfig & 0x0008)
2417                         ap->flags |= MR_TOGGLE_RX;
2418                 if (ap->rxconfig & ANEG_CFG_NP)
2419                         ap->flags |= MR_NP_RX;
2420                 ap->flags |= MR_PAGE_RX;
2421
2422                 ap->state = ANEG_STATE_COMPLETE_ACK;
2423                 ret = ANEG_TIMER_ENAB;
2424                 break;
2425
2426         case ANEG_STATE_COMPLETE_ACK:
2427                 if (ap->ability_match != 0 &&
2428                     ap->rxconfig == 0) {
2429                         ap->state = ANEG_STATE_AN_ENABLE;
2430                         break;
2431                 }
2432                 delta = ap->cur_time - ap->link_time;
2433                 if (delta > ANEG_STATE_SETTLE_TIME) {
2434                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2436                         } else {
2437                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438                                     !(ap->flags & MR_NP_RX)) {
2439                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2440                                 } else {
2441                                         ret = ANEG_FAILED;
2442                                 }
2443                         }
2444                 }
2445                 break;
2446
2447         case ANEG_STATE_IDLE_DETECT_INIT:
2448                 ap->link_time = ap->cur_time;
2449                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450                 tw32_f(MAC_MODE, tp->mac_mode);
2451                 udelay(40);
2452
2453                 ap->state = ANEG_STATE_IDLE_DETECT;
2454                 ret = ANEG_TIMER_ENAB;
2455                 break;
2456
2457         case ANEG_STATE_IDLE_DETECT:
2458                 if (ap->ability_match != 0 &&
2459                     ap->rxconfig == 0) {
2460                         ap->state = ANEG_STATE_AN_ENABLE;
2461                         break;
2462                 }
2463                 delta = ap->cur_time - ap->link_time;
2464                 if (delta > ANEG_STATE_SETTLE_TIME) {
2465                         /* XXX another gem from the Broadcom driver :( */
2466                         ap->state = ANEG_STATE_LINK_OK;
2467                 }
2468                 break;
2469
2470         case ANEG_STATE_LINK_OK:
2471                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2472                 ret = ANEG_DONE;
2473                 break;
2474
2475         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476                 /* ??? unimplemented */
2477                 break;
2478
2479         case ANEG_STATE_NEXT_PAGE_WAIT:
2480                 /* ??? unimplemented */
2481                 break;
2482
2483         default:
2484                 ret = ANEG_FAILED;
2485                 break;
2486         };
2487
2488         return ret;
2489 }
2490
2491 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2492 {
2493         int res = 0;
2494         struct tg3_fiber_aneginfo aninfo;
2495         int status = ANEG_FAILED;
2496         unsigned int tick;
2497         u32 tmp;
2498
2499         tw32_f(MAC_TX_AUTO_NEG, 0);
2500
2501         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2503         udelay(40);
2504
2505         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2506         udelay(40);
2507
2508         memset(&aninfo, 0, sizeof(aninfo));
2509         aninfo.flags |= MR_AN_ENABLE;
2510         aninfo.state = ANEG_STATE_UNKNOWN;
2511         aninfo.cur_time = 0;
2512         tick = 0;
2513         while (++tick < 195000) {
2514                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515                 if (status == ANEG_DONE || status == ANEG_FAILED)
2516                         break;
2517
2518                 udelay(1);
2519         }
2520
2521         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         *flags = aninfo.flags;
2526
2527         if (status == ANEG_DONE &&
2528             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529                              MR_LP_ADV_FULL_DUPLEX)))
2530                 res = 1;
2531
2532         return res;
2533 }
2534
2535 static void tg3_init_bcm8002(struct tg3 *tp)
2536 {
2537         u32 mac_status = tr32(MAC_STATUS);
2538         int i;
2539
2540         /* Reset when initting first time or we have a link. */
2541         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542             !(mac_status & MAC_STATUS_PCS_SYNCED))
2543                 return;
2544
2545         /* Set PLL lock range. */
2546         tg3_writephy(tp, 0x16, 0x8007);
2547
2548         /* SW reset */
2549         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2550
2551         /* Wait for reset to complete. */
2552         /* XXX schedule_timeout() ... */
2553         for (i = 0; i < 500; i++)
2554                 udelay(10);
2555
2556         /* Config mode; select PMA/Ch 1 regs. */
2557         tg3_writephy(tp, 0x10, 0x8411);
2558
2559         /* Enable auto-lock and comdet, select txclk for tx. */
2560         tg3_writephy(tp, 0x11, 0x0a10);
2561
2562         tg3_writephy(tp, 0x18, 0x00a0);
2563         tg3_writephy(tp, 0x16, 0x41ff);
2564
2565         /* Assert and deassert POR. */
2566         tg3_writephy(tp, 0x13, 0x0400);
2567         udelay(40);
2568         tg3_writephy(tp, 0x13, 0x0000);
2569
2570         tg3_writephy(tp, 0x11, 0x0a50);
2571         udelay(40);
2572         tg3_writephy(tp, 0x11, 0x0a10);
2573
2574         /* Wait for signal to stabilize */
2575         /* XXX schedule_timeout() ... */
2576         for (i = 0; i < 15000; i++)
2577                 udelay(10);
2578
2579         /* Deselect the channel register so we can read the PHYID
2580          * later.
2581          */
2582         tg3_writephy(tp, 0x10, 0x8011);
2583 }
2584
2585 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2586 {
2587         u32 sg_dig_ctrl, sg_dig_status;
2588         u32 serdes_cfg, expected_sg_dig_ctrl;
2589         int workaround, port_a;
2590         int current_link_up;
2591
2592         serdes_cfg = 0;
2593         expected_sg_dig_ctrl = 0;
2594         workaround = 0;
2595         port_a = 1;
2596         current_link_up = 0;
2597
2598         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2600                 workaround = 1;
2601                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2602                         port_a = 0;
2603
2604                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605                 /* preserve bits 20-23 for voltage regulator */
2606                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2607         }
2608
2609         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2610
2611         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612                 if (sg_dig_ctrl & (1 << 31)) {
2613                         if (workaround) {
2614                                 u32 val = serdes_cfg;
2615
2616                                 if (port_a)
2617                                         val |= 0xc010000;
2618                                 else
2619                                         val |= 0x4010000;
2620                                 tw32_f(MAC_SERDES_CFG, val);
2621                         }
2622                         tw32_f(SG_DIG_CTRL, 0x01388400);
2623                 }
2624                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625                         tg3_setup_flow_control(tp, 0, 0);
2626                         current_link_up = 1;
2627                 }
2628                 goto out;
2629         }
2630
2631         /* Want auto-negotiation.  */
2632         expected_sg_dig_ctrl = 0x81388400;
2633
2634         /* Pause capability */
2635         expected_sg_dig_ctrl |= (1 << 11);
2636
2637         /* Asymettric pause */
2638         expected_sg_dig_ctrl |= (1 << 12);
2639
2640         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2641                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642                     tp->serdes_counter &&
2643                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644                                     MAC_STATUS_RCVD_CFG)) ==
2645                      MAC_STATUS_PCS_SYNCED)) {
2646                         tp->serdes_counter--;
2647                         current_link_up = 1;
2648                         goto out;
2649                 }
2650 restart_autoneg:
2651                 if (workaround)
2652                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2654                 udelay(5);
2655                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2656
2657                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2659         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660                                  MAC_STATUS_SIGNAL_DET)) {
2661                 sg_dig_status = tr32(SG_DIG_STATUS);
2662                 mac_status = tr32(MAC_STATUS);
2663
2664                 if ((sg_dig_status & (1 << 1)) &&
2665                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666                         u32 local_adv, remote_adv;
2667
2668                         local_adv = ADVERTISE_PAUSE_CAP;
2669                         remote_adv = 0;
2670                         if (sg_dig_status & (1 << 19))
2671                                 remote_adv |= LPA_PAUSE_CAP;
2672                         if (sg_dig_status & (1 << 20))
2673                                 remote_adv |= LPA_PAUSE_ASYM;
2674
2675                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2676                         current_link_up = 1;
2677                         tp->serdes_counter = 0;
2678                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2679                 } else if (!(sg_dig_status & (1 << 1))) {
2680                         if (tp->serdes_counter)
2681                                 tp->serdes_counter--;
2682                         else {
2683                                 if (workaround) {
2684                                         u32 val = serdes_cfg;
2685
2686                                         if (port_a)
2687                                                 val |= 0xc010000;
2688                                         else
2689                                                 val |= 0x4010000;
2690
2691                                         tw32_f(MAC_SERDES_CFG, val);
2692                                 }
2693
2694                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2695                                 udelay(40);
2696
2697                                 /* Link parallel detection - link is up */
2698                                 /* only if we have PCS_SYNC and not */
2699                                 /* receiving config code words */
2700                                 mac_status = tr32(MAC_STATUS);
2701                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703                                         tg3_setup_flow_control(tp, 0, 0);
2704                                         current_link_up = 1;
2705                                         tp->tg3_flags2 |=
2706                                                 TG3_FLG2_PARALLEL_DETECT;
2707                                         tp->serdes_counter =
2708                                                 SERDES_PARALLEL_DET_TIMEOUT;
2709                                 } else
2710                                         goto restart_autoneg;
2711                         }
2712                 }
2713         } else {
2714                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2716         }
2717
2718 out:
2719         return current_link_up;
2720 }
2721
2722 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2723 {
2724         int current_link_up = 0;
2725
2726         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2727                 goto out;
2728
2729         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2730                 u32 flags;
2731                 int i;
2732
2733                 if (fiber_autoneg(tp, &flags)) {
2734                         u32 local_adv, remote_adv;
2735
2736                         local_adv = ADVERTISE_PAUSE_CAP;
2737                         remote_adv = 0;
2738                         if (flags & MR_LP_ADV_SYM_PAUSE)
2739                                 remote_adv |= LPA_PAUSE_CAP;
2740                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2741                                 remote_adv |= LPA_PAUSE_ASYM;
2742
2743                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2744
2745                         current_link_up = 1;
2746                 }
2747                 for (i = 0; i < 30; i++) {
2748                         udelay(20);
2749                         tw32_f(MAC_STATUS,
2750                                (MAC_STATUS_SYNC_CHANGED |
2751                                 MAC_STATUS_CFG_CHANGED));
2752                         udelay(40);
2753                         if ((tr32(MAC_STATUS) &
2754                              (MAC_STATUS_SYNC_CHANGED |
2755                               MAC_STATUS_CFG_CHANGED)) == 0)
2756                                 break;
2757                 }
2758
2759                 mac_status = tr32(MAC_STATUS);
2760                 if (current_link_up == 0 &&
2761                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762                     !(mac_status & MAC_STATUS_RCVD_CFG))
2763                         current_link_up = 1;
2764         } else {
2765                 /* Forcing 1000FD link up. */
2766                 current_link_up = 1;
2767
2768                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2769                 udelay(40);
2770
2771                 tw32_f(MAC_MODE, tp->mac_mode);
2772                 udelay(40);
2773         }
2774
2775 out:
2776         return current_link_up;
2777 }
2778
2779 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2780 {
2781         u32 orig_pause_cfg;
2782         u16 orig_active_speed;
2783         u8 orig_active_duplex;
2784         u32 mac_status;
2785         int current_link_up;
2786         int i;
2787
2788         orig_pause_cfg =
2789                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790                                   TG3_FLAG_TX_PAUSE));
2791         orig_active_speed = tp->link_config.active_speed;
2792         orig_active_duplex = tp->link_config.active_duplex;
2793
2794         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795             netif_carrier_ok(tp->dev) &&
2796             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797                 mac_status = tr32(MAC_STATUS);
2798                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799                                MAC_STATUS_SIGNAL_DET |
2800                                MAC_STATUS_CFG_CHANGED |
2801                                MAC_STATUS_RCVD_CFG);
2802                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803                                    MAC_STATUS_SIGNAL_DET)) {
2804                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805                                             MAC_STATUS_CFG_CHANGED));
2806                         return 0;
2807                 }
2808         }
2809
2810         tw32_f(MAC_TX_AUTO_NEG, 0);
2811
2812         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814         tw32_f(MAC_MODE, tp->mac_mode);
2815         udelay(40);
2816
2817         if (tp->phy_id == PHY_ID_BCM8002)
2818                 tg3_init_bcm8002(tp);
2819
2820         /* Enable link change event even when serdes polling.  */
2821         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822         udelay(40);
2823
2824         current_link_up = 0;
2825         mac_status = tr32(MAC_STATUS);
2826
2827         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2829         else
2830                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2831
2832         tp->hw_status->status =
2833                 (SD_STATUS_UPDATED |
2834                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2835
2836         for (i = 0; i < 100; i++) {
2837                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838                                     MAC_STATUS_CFG_CHANGED));
2839                 udelay(5);
2840                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2841                                          MAC_STATUS_CFG_CHANGED |
2842                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2843                         break;
2844         }
2845
2846         mac_status = tr32(MAC_STATUS);
2847         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848                 current_link_up = 0;
2849                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850                     tp->serdes_counter == 0) {
2851                         tw32_f(MAC_MODE, (tp->mac_mode |
2852                                           MAC_MODE_SEND_CONFIGS));
2853                         udelay(1);
2854                         tw32_f(MAC_MODE, tp->mac_mode);
2855                 }
2856         }
2857
2858         if (current_link_up == 1) {
2859                 tp->link_config.active_speed = SPEED_1000;
2860                 tp->link_config.active_duplex = DUPLEX_FULL;
2861                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862                                     LED_CTRL_LNKLED_OVERRIDE |
2863                                     LED_CTRL_1000MBPS_ON));
2864         } else {
2865                 tp->link_config.active_speed = SPEED_INVALID;
2866                 tp->link_config.active_duplex = DUPLEX_INVALID;
2867                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868                                     LED_CTRL_LNKLED_OVERRIDE |
2869                                     LED_CTRL_TRAFFIC_OVERRIDE));
2870         }
2871
2872         if (current_link_up != netif_carrier_ok(tp->dev)) {
2873                 if (current_link_up)
2874                         netif_carrier_on(tp->dev);
2875                 else
2876                         netif_carrier_off(tp->dev);
2877                 tg3_link_report(tp);
2878         } else {
2879                 u32 now_pause_cfg =
2880                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2881                                          TG3_FLAG_TX_PAUSE);
2882                 if (orig_pause_cfg != now_pause_cfg ||
2883                     orig_active_speed != tp->link_config.active_speed ||
2884                     orig_active_duplex != tp->link_config.active_duplex)
2885                         tg3_link_report(tp);
2886         }
2887
2888         return 0;
2889 }
2890
2891 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2892 {
2893         int current_link_up, err = 0;
2894         u32 bmsr, bmcr;
2895         u16 current_speed;
2896         u8 current_duplex;
2897
2898         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899         tw32_f(MAC_MODE, tp->mac_mode);
2900         udelay(40);
2901
2902         tw32(MAC_EVENT, 0);
2903
2904         tw32_f(MAC_STATUS,
2905              (MAC_STATUS_SYNC_CHANGED |
2906               MAC_STATUS_CFG_CHANGED |
2907               MAC_STATUS_MI_COMPLETION |
2908               MAC_STATUS_LNKSTATE_CHANGED));
2909         udelay(40);
2910
2911         if (force_reset)
2912                 tg3_phy_reset(tp);
2913
2914         current_link_up = 0;
2915         current_speed = SPEED_INVALID;
2916         current_duplex = DUPLEX_INVALID;
2917
2918         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922                         bmsr |= BMSR_LSTATUS;
2923                 else
2924                         bmsr &= ~BMSR_LSTATUS;
2925         }
2926
2927         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2928
2929         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931                 /* do nothing, just check for link up at the end */
2932         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2933                 u32 adv, new_adv;
2934
2935                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937                                   ADVERTISE_1000XPAUSE |
2938                                   ADVERTISE_1000XPSE_ASYM |
2939                                   ADVERTISE_SLCT);
2940
2941                 /* Always advertise symmetric PAUSE just like copper */
2942                 new_adv |= ADVERTISE_1000XPAUSE;
2943
2944                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945                         new_adv |= ADVERTISE_1000XHALF;
2946                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947                         new_adv |= ADVERTISE_1000XFULL;
2948
2949                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952                         tg3_writephy(tp, MII_BMCR, bmcr);
2953
2954                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2955                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2956                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957
2958                         return err;
2959                 }
2960         } else {
2961                 u32 new_bmcr;
2962
2963                 bmcr &= ~BMCR_SPEED1000;
2964                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2965
2966                 if (tp->link_config.duplex == DUPLEX_FULL)
2967                         new_bmcr |= BMCR_FULLDPLX;
2968
2969                 if (new_bmcr != bmcr) {
2970                         /* BMCR_SPEED1000 is a reserved bit that needs
2971                          * to be set on write.
2972                          */
2973                         new_bmcr |= BMCR_SPEED1000;
2974
2975                         /* Force a linkdown */
2976                         if (netif_carrier_ok(tp->dev)) {
2977                                 u32 adv;
2978
2979                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980                                 adv &= ~(ADVERTISE_1000XFULL |
2981                                          ADVERTISE_1000XHALF |
2982                                          ADVERTISE_SLCT);
2983                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2984                                 tg3_writephy(tp, MII_BMCR, bmcr |
2985                                                            BMCR_ANRESTART |
2986                                                            BMCR_ANENABLE);
2987                                 udelay(10);
2988                                 netif_carrier_off(tp->dev);
2989                         }
2990                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2991                         bmcr = new_bmcr;
2992                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2995                             ASIC_REV_5714) {
2996                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997                                         bmsr |= BMSR_LSTATUS;
2998                                 else
2999                                         bmsr &= ~BMSR_LSTATUS;
3000                         }
3001                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3002                 }
3003         }
3004
3005         if (bmsr & BMSR_LSTATUS) {
3006                 current_speed = SPEED_1000;
3007                 current_link_up = 1;
3008                 if (bmcr & BMCR_FULLDPLX)
3009                         current_duplex = DUPLEX_FULL;
3010                 else
3011                         current_duplex = DUPLEX_HALF;
3012
3013                 if (bmcr & BMCR_ANENABLE) {
3014                         u32 local_adv, remote_adv, common;
3015
3016                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018                         common = local_adv & remote_adv;
3019                         if (common & (ADVERTISE_1000XHALF |
3020                                       ADVERTISE_1000XFULL)) {
3021                                 if (common & ADVERTISE_1000XFULL)
3022                                         current_duplex = DUPLEX_FULL;
3023                                 else
3024                                         current_duplex = DUPLEX_HALF;
3025
3026                                 tg3_setup_flow_control(tp, local_adv,
3027                                                        remote_adv);
3028                         }
3029                         else
3030                                 current_link_up = 0;
3031                 }
3032         }
3033
3034         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035         if (tp->link_config.active_duplex == DUPLEX_HALF)
3036                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3037
3038         tw32_f(MAC_MODE, tp->mac_mode);
3039         udelay(40);
3040
3041         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3042
3043         tp->link_config.active_speed = current_speed;
3044         tp->link_config.active_duplex = current_duplex;
3045
3046         if (current_link_up != netif_carrier_ok(tp->dev)) {
3047                 if (current_link_up)
3048                         netif_carrier_on(tp->dev);
3049                 else {
3050                         netif_carrier_off(tp->dev);
3051                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3052                 }
3053                 tg3_link_report(tp);
3054         }
3055         return err;
3056 }
3057
3058 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3059 {
3060         if (tp->serdes_counter) {
3061                 /* Give autoneg time to complete. */
3062                 tp->serdes_counter--;
3063                 return;
3064         }
3065         if (!netif_carrier_ok(tp->dev) &&
3066             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3067                 u32 bmcr;
3068
3069                 tg3_readphy(tp, MII_BMCR, &bmcr);
3070                 if (bmcr & BMCR_ANENABLE) {
3071                         u32 phy1, phy2;
3072
3073                         /* Select shadow register 0x1f */
3074                         tg3_writephy(tp, 0x1c, 0x7c00);
3075                         tg3_readphy(tp, 0x1c, &phy1);
3076
3077                         /* Select expansion interrupt status register */
3078                         tg3_writephy(tp, 0x17, 0x0f01);
3079                         tg3_readphy(tp, 0x15, &phy2);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081
3082                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083                                 /* We have signal detect and not receiving
3084                                  * config code words, link is up by parallel
3085                                  * detection.
3086                                  */
3087
3088                                 bmcr &= ~BMCR_ANENABLE;
3089                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090                                 tg3_writephy(tp, MII_BMCR, bmcr);
3091                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3092                         }
3093                 }
3094         }
3095         else if (netif_carrier_ok(tp->dev) &&
3096                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3098                 u32 phy2;
3099
3100                 /* Select expansion interrupt status register */
3101                 tg3_writephy(tp, 0x17, 0x0f01);
3102                 tg3_readphy(tp, 0x15, &phy2);
3103                 if (phy2 & 0x20) {
3104                         u32 bmcr;
3105
3106                         /* Config code words received, turn on autoneg. */
3107                         tg3_readphy(tp, MII_BMCR, &bmcr);
3108                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3109
3110                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3111
3112                 }
3113         }
3114 }
3115
3116 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3117 {
3118         int err;
3119
3120         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121                 err = tg3_setup_fiber_phy(tp, force_reset);
3122         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3124         } else {
3125                 err = tg3_setup_copper_phy(tp, force_reset);
3126         }
3127
3128         if (tp->link_config.active_speed == SPEED_1000 &&
3129             tp->link_config.active_duplex == DUPLEX_HALF)
3130                 tw32(MAC_TX_LENGTHS,
3131                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132                       (6 << TX_LENGTHS_IPG_SHIFT) |
3133                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3134         else
3135                 tw32(MAC_TX_LENGTHS,
3136                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137                       (6 << TX_LENGTHS_IPG_SHIFT) |
3138                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3139
3140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141                 if (netif_carrier_ok(tp->dev)) {
3142                         tw32(HOSTCC_STAT_COAL_TICKS,
3143                              tp->coal.stats_block_coalesce_usecs);
3144                 } else {
3145                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3146                 }
3147         }
3148
3149         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151                 if (!netif_carrier_ok(tp->dev))
3152                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3153                               tp->pwrmgmt_thresh;
3154                 else
3155                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156                 tw32(PCIE_PWR_MGMT_THRESH, val);
3157         }
3158
3159         return err;
3160 }
3161
3162 /* This is called whenever we suspect that the system chipset is re-
3163  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164  * is bogus tx completions. We try to recover by setting the
3165  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3166  * in the workqueue.
3167  */
3168 static void tg3_tx_recover(struct tg3 *tp)
3169 {
3170         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3172
3173         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174                "mapped I/O cycles to the network device, attempting to "
3175                "recover. Please report the problem to the driver maintainer "
3176                "and include system chipset information.\n", tp->dev->name);
3177
3178         spin_lock(&tp->lock);
3179         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3180         spin_unlock(&tp->lock);
3181 }
3182
3183 static inline u32 tg3_tx_avail(struct tg3 *tp)
3184 {
3185         smp_mb();
3186         return (tp->tx_pending -
3187                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3188 }
3189
3190 /* Tigon3 never reports partial packet sends.  So we do not
3191  * need special logic to handle SKBs that have not had all
3192  * of their frags sent yet, like SunGEM does.
3193  */
3194 static void tg3_tx(struct tg3 *tp)
3195 {
3196         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197         u32 sw_idx = tp->tx_cons;
3198
3199         while (sw_idx != hw_idx) {
3200                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201                 struct sk_buff *skb = ri->skb;
3202                 int i, tx_bug = 0;
3203
3204                 if (unlikely(skb == NULL)) {
3205                         tg3_tx_recover(tp);
3206                         return;
3207                 }
3208
3209                 pci_unmap_single(tp->pdev,
3210                                  pci_unmap_addr(ri, mapping),
3211                                  skb_headlen(skb),
3212                                  PCI_DMA_TODEVICE);
3213
3214                 ri->skb = NULL;
3215
3216                 sw_idx = NEXT_TX(sw_idx);
3217
3218                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3219                         ri = &tp->tx_buffers[sw_idx];
3220                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3221                                 tx_bug = 1;
3222
3223                         pci_unmap_page(tp->pdev,
3224                                        pci_unmap_addr(ri, mapping),
3225                                        skb_shinfo(skb)->frags[i].size,
3226                                        PCI_DMA_TODEVICE);
3227
3228                         sw_idx = NEXT_TX(sw_idx);
3229                 }
3230
3231                 dev_kfree_skb(skb);
3232
3233                 if (unlikely(tx_bug)) {
3234                         tg3_tx_recover(tp);
3235                         return;
3236                 }
3237         }
3238
3239         tp->tx_cons = sw_idx;
3240
3241         /* Need to make the tx_cons update visible to tg3_start_xmit()
3242          * before checking for netif_queue_stopped().  Without the
3243          * memory barrier, there is a small possibility that tg3_start_xmit()
3244          * will miss it and cause the queue to be stopped forever.
3245          */
3246         smp_mb();
3247
3248         if (unlikely(netif_queue_stopped(tp->dev) &&
3249                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3250                 netif_tx_lock(tp->dev);
3251                 if (netif_queue_stopped(tp->dev) &&
3252                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3253                         netif_wake_queue(tp->dev);
3254                 netif_tx_unlock(tp->dev);
3255         }
3256 }
3257
3258 /* Returns size of skb allocated or < 0 on error.
3259  *
3260  * We only need to fill in the address because the other members
3261  * of the RX descriptor are invariant, see tg3_init_rings.
3262  *
3263  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3264  * posting buffers we only dirty the first cache line of the RX
3265  * descriptor (containing the address).  Whereas for the RX status
3266  * buffers the cpu only reads the last cacheline of the RX descriptor
3267  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3268  */
3269 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3270                             int src_idx, u32 dest_idx_unmasked)
3271 {
3272         struct tg3_rx_buffer_desc *desc;
3273         struct ring_info *map, *src_map;
3274         struct sk_buff *skb;
3275         dma_addr_t mapping;
3276         int skb_size, dest_idx;
3277
3278         src_map = NULL;
3279         switch (opaque_key) {
3280         case RXD_OPAQUE_RING_STD:
3281                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3282                 desc = &tp->rx_std[dest_idx];
3283                 map = &tp->rx_std_buffers[dest_idx];
3284                 if (src_idx >= 0)
3285                         src_map = &tp->rx_std_buffers[src_idx];
3286                 skb_size = tp->rx_pkt_buf_sz;
3287                 break;
3288
3289         case RXD_OPAQUE_RING_JUMBO:
3290                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3291                 desc = &tp->rx_jumbo[dest_idx];
3292                 map = &tp->rx_jumbo_buffers[dest_idx];
3293                 if (src_idx >= 0)
3294                         src_map = &tp->rx_jumbo_buffers[src_idx];
3295                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3296                 break;
3297
3298         default:
3299                 return -EINVAL;
3300         };
3301
3302         /* Do not overwrite any of the map or rp information
3303          * until we are sure we can commit to a new buffer.
3304          *
3305          * Callers depend upon this behavior and assume that
3306          * we leave everything unchanged if we fail.
3307          */
3308         skb = netdev_alloc_skb(tp->dev, skb_size);
3309         if (skb == NULL)
3310                 return -ENOMEM;
3311
3312         skb_reserve(skb, tp->rx_offset);
3313
3314         mapping = pci_map_single(tp->pdev, skb->data,
3315                                  skb_size - tp->rx_offset,
3316                                  PCI_DMA_FROMDEVICE);
3317
3318         map->skb = skb;
3319         pci_unmap_addr_set(map, mapping, mapping);
3320
3321         if (src_map != NULL)
3322                 src_map->skb = NULL;
3323
3324         desc->addr_hi = ((u64)mapping >> 32);
3325         desc->addr_lo = ((u64)mapping & 0xffffffff);
3326
3327         return skb_size;
3328 }
3329
3330 /* We only need to move over in the address because the other
3331  * members of the RX descriptor are invariant.  See notes above
3332  * tg3_alloc_rx_skb for full details.
3333  */
3334 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3335                            int src_idx, u32 dest_idx_unmasked)
3336 {
3337         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3338         struct ring_info *src_map, *dest_map;
3339         int dest_idx;
3340
3341         switch (opaque_key) {
3342         case RXD_OPAQUE_RING_STD:
3343                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3344                 dest_desc = &tp->rx_std[dest_idx];
3345                 dest_map = &tp->rx_std_buffers[dest_idx];
3346                 src_desc = &tp->rx_std[src_idx];
3347                 src_map = &tp->rx_std_buffers[src_idx];
3348                 break;
3349
3350         case RXD_OPAQUE_RING_JUMBO:
3351                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3352                 dest_desc = &tp->rx_jumbo[dest_idx];
3353                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3354                 src_desc = &tp->rx_jumbo[src_idx];
3355                 src_map = &tp->rx_jumbo_buffers[src_idx];
3356                 break;
3357
3358         default:
3359                 return;
3360         };
3361
3362         dest_map->skb = src_map->skb;
3363         pci_unmap_addr_set(dest_map, mapping,
3364                            pci_unmap_addr(src_map, mapping));
3365         dest_desc->addr_hi = src_desc->addr_hi;
3366         dest_desc->addr_lo = src_desc->addr_lo;
3367
3368         src_map->skb = NULL;
3369 }
3370
3371 #if TG3_VLAN_TAG_USED
3372 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3373 {
3374         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3375 }
3376 #endif
3377
3378 /* The RX ring scheme is composed of multiple rings which post fresh
3379  * buffers to the chip, and one special ring the chip uses to report
3380  * status back to the host.
3381  *
3382  * The special ring reports the status of received packets to the
3383  * host.  The chip does not write into the original descriptor the
3384  * RX buffer was obtained from.  The chip simply takes the original
3385  * descriptor as provided by the host, updates the status and length
3386  * field, then writes this into the next status ring entry.
3387  *
3388  * Each ring the host uses to post buffers to the chip is described
3389  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3390  * it is first placed into the on-chip ram.  When the packet's length
3391  * is known, it walks down the TG3_BDINFO entries to select the ring.
3392  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3393  * which is within the range of the new packet's length is chosen.
3394  *
3395  * The "separate ring for rx status" scheme may sound queer, but it makes
3396  * sense from a cache coherency perspective.  If only the host writes
3397  * to the buffer post rings, and only the chip writes to the rx status
3398  * rings, then cache lines never move beyond shared-modified state.
3399  * If both the host and chip were to write into the same ring, cache line
3400  * eviction could occur since both entities want it in an exclusive state.
3401  */
3402 static int tg3_rx(struct tg3 *tp, int budget)
3403 {
3404         u32 work_mask, rx_std_posted = 0;
3405         u32 sw_idx = tp->rx_rcb_ptr;
3406         u16 hw_idx;
3407         int received;
3408
3409         hw_idx = tp->hw_status->idx[0].rx_producer;
3410         /*
3411          * We need to order the read of hw_idx and the read of
3412          * the opaque cookie.
3413          */
3414         rmb();
3415         work_mask = 0;
3416         received = 0;
3417         while (sw_idx != hw_idx && budget > 0) {
3418                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3419                 unsigned int len;
3420                 struct sk_buff *skb;
3421                 dma_addr_t dma_addr;
3422                 u32 opaque_key, desc_idx, *post_ptr;
3423
3424                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3425                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3426                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3427                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3428                                                   mapping);
3429                         skb = tp->rx_std_buffers[desc_idx].skb;
3430                         post_ptr = &tp->rx_std_ptr;
3431                         rx_std_posted++;
3432                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3433                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3434                                                   mapping);
3435                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3436                         post_ptr = &tp->rx_jumbo_ptr;
3437                 }
3438                 else {
3439                         goto next_pkt_nopost;
3440                 }
3441
3442                 work_mask |= opaque_key;
3443
3444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3446                 drop_it:
3447                         tg3_recycle_rx(tp, opaque_key,
3448                                        desc_idx, *post_ptr);
3449                 drop_it_no_recycle:
3450                         /* Other statistics kept track of by card. */
3451                         tp->net_stats.rx_dropped++;
3452                         goto next_pkt;
3453                 }
3454
3455                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3456
3457                 if (len > RX_COPY_THRESHOLD
3458                         && tp->rx_offset == 2
3459                         /* rx_offset != 2 iff this is a 5701 card running
3460                          * in PCI-X mode [see tg3_get_invariants()] */
3461                 ) {
3462                         int skb_size;
3463
3464                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3465                                                     desc_idx, *post_ptr);
3466                         if (skb_size < 0)
3467                                 goto drop_it;
3468
3469                         pci_unmap_single(tp->pdev, dma_addr,
3470                                          skb_size - tp->rx_offset,
3471                                          PCI_DMA_FROMDEVICE);
3472
3473                         skb_put(skb, len);
3474                 } else {
3475                         struct sk_buff *copy_skb;
3476
3477                         tg3_recycle_rx(tp, opaque_key,
3478                                        desc_idx, *post_ptr);
3479
3480                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3481                         if (copy_skb == NULL)
3482                                 goto drop_it_no_recycle;
3483
3484                         skb_reserve(copy_skb, 2);
3485                         skb_put(copy_skb, len);
3486                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3488                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3489
3490                         /* We'll reuse the original ring buffer. */
3491                         skb = copy_skb;
3492                 }
3493
3494                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3495                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3496                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3497                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3499                 else
3500                         skb->ip_summed = CHECKSUM_NONE;
3501
3502                 skb->protocol = eth_type_trans(skb, tp->dev);
3503 #if TG3_VLAN_TAG_USED
3504                 if (tp->vlgrp != NULL &&
3505                     desc->type_flags & RXD_FLAG_VLAN) {
3506                         tg3_vlan_rx(tp, skb,
3507                                     desc->err_vlan & RXD_VLAN_MASK);
3508                 } else
3509 #endif
3510                         netif_receive_skb(skb);
3511
3512                 tp->dev->last_rx = jiffies;
3513                 received++;
3514                 budget--;
3515
3516 next_pkt:
3517                 (*post_ptr)++;
3518
3519                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3520                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3521
3522                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3523                                      TG3_64BIT_REG_LOW, idx);
3524                         work_mask &= ~RXD_OPAQUE_RING_STD;
3525                         rx_std_posted = 0;
3526                 }
3527 next_pkt_nopost:
3528                 sw_idx++;
3529                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3530
3531                 /* Refresh hw_idx to see if there is new work */
3532                 if (sw_idx == hw_idx) {
3533                         hw_idx = tp->hw_status->idx[0].rx_producer;
3534                         rmb();
3535                 }
3536         }
3537
3538         /* ACK the status ring. */
3539         tp->rx_rcb_ptr = sw_idx;
3540         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3541
3542         /* Refill RX ring(s). */
3543         if (work_mask & RXD_OPAQUE_RING_STD) {
3544                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3545                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3546                              sw_idx);
3547         }
3548         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3549                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3550                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3551                              sw_idx);
3552         }
3553         mmiowb();
3554
3555         return received;
3556 }
3557
3558 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3559 {
3560         struct tg3_hw_status *sblk = tp->hw_status;
3561
3562         /* handle link change and other phy events */
3563         if (!(tp->tg3_flags &
3564               (TG3_FLAG_USE_LINKCHG_REG |
3565                TG3_FLAG_POLL_SERDES))) {
3566                 if (sblk->status & SD_STATUS_LINK_CHG) {
3567                         sblk->status = SD_STATUS_UPDATED |
3568                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3569                         spin_lock(&tp->lock);
3570                         tg3_setup_phy(tp, 0);
3571                         spin_unlock(&tp->lock);
3572                 }
3573         }
3574
3575         /* run TX completion thread */
3576         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3577                 tg3_tx(tp);
3578                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3579                         return work_done;
3580         }
3581
3582         /* run RX thread, within the bounds set by NAPI.
3583          * All RX "locking" is done by ensuring outside
3584          * code synchronizes with tg3->napi.poll()
3585          */
3586         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3587                 work_done += tg3_rx(tp, budget - work_done);
3588
3589         return work_done;
3590 }
3591
3592 static int tg3_poll(struct napi_struct *napi, int budget)
3593 {
3594         struct tg3 *tp = container_of(napi, struct tg3, napi);
3595         int work_done = 0;
3596         struct tg3_hw_status *sblk = tp->hw_status;
3597
3598         while (1) {
3599                 work_done = tg3_poll_work(tp, work_done, budget);
3600
3601                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3602                         goto tx_recovery;
3603
3604                 if (unlikely(work_done >= budget))
3605                         break;
3606
3607                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3608                         /* tp->last_tag is used in tg3_restart_ints() below
3609                          * to tell the hw how much work has been processed,
3610                          * so we must read it before checking for more work.
3611                          */
3612                         tp->last_tag = sblk->status_tag;
3613                         rmb();
3614                 } else
3615                         sblk->status &= ~SD_STATUS_UPDATED;
3616
3617                 if (likely(!tg3_has_work(tp))) {
3618                         netif_rx_complete(tp->dev, napi);
3619                         tg3_restart_ints(tp);
3620                         break;
3621                 }
3622         }
3623
3624         return work_done;
3625
3626 tx_recovery:
3627         /* work_done is guaranteed to be less than budget. */
3628         netif_rx_complete(tp->dev, napi);
3629         schedule_work(&tp->reset_task);
3630         return work_done;
3631 }
3632
3633 static void tg3_irq_quiesce(struct tg3 *tp)
3634 {
3635         BUG_ON(tp->irq_sync);
3636
3637         tp->irq_sync = 1;
3638         smp_mb();
3639
3640         synchronize_irq(tp->pdev->irq);
3641 }
3642
3643 static inline int tg3_irq_sync(struct tg3 *tp)
3644 {
3645         return tp->irq_sync;
3646 }
3647
3648 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3649  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3650  * with as well.  Most of the time, this is not necessary except when
3651  * shutting down the device.
3652  */
3653 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3654 {
3655         spin_lock_bh(&tp->lock);
3656         if (irq_sync)
3657                 tg3_irq_quiesce(tp);
3658 }
3659
3660 static inline void tg3_full_unlock(struct tg3 *tp)
3661 {
3662         spin_unlock_bh(&tp->lock);
3663 }
3664
3665 /* One-shot MSI handler - Chip automatically disables interrupt
3666  * after sending MSI so driver doesn't have to do it.
3667  */
3668 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3669 {
3670         struct net_device *dev = dev_id;
3671         struct tg3 *tp = netdev_priv(dev);
3672
3673         prefetch(tp->hw_status);
3674         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3675
3676         if (likely(!tg3_irq_sync(tp)))
3677                 netif_rx_schedule(dev, &tp->napi);
3678
3679         return IRQ_HANDLED;
3680 }
3681
3682 /* MSI ISR - No need to check for interrupt sharing and no need to
3683  * flush status block and interrupt mailbox. PCI ordering rules
3684  * guarantee that MSI will arrive after the status block.
3685  */
3686 static irqreturn_t tg3_msi(int irq, void *dev_id)
3687 {
3688         struct net_device *dev = dev_id;
3689         struct tg3 *tp = netdev_priv(dev);
3690
3691         prefetch(tp->hw_status);
3692         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3693         /*
3694          * Writing any value to intr-mbox-0 clears PCI INTA# and
3695          * chip-internal interrupt pending events.
3696          * Writing non-zero to intr-mbox-0 additional tells the
3697          * NIC to stop sending us irqs, engaging "in-intr-handler"
3698          * event coalescing.
3699          */
3700         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3701         if (likely(!tg3_irq_sync(tp)))
3702                 netif_rx_schedule(dev, &tp->napi);
3703
3704         return IRQ_RETVAL(1);
3705 }
3706
3707 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3708 {
3709         struct net_device *dev = dev_id;
3710         struct tg3 *tp = netdev_priv(dev);
3711         struct tg3_hw_status *sblk = tp->hw_status;
3712         unsigned int handled = 1;
3713
3714         /* In INTx mode, it is possible for the interrupt to arrive at
3715          * the CPU before the status block posted prior to the interrupt.
3716          * Reading the PCI State register will confirm whether the
3717          * interrupt is ours and will flush the status block.
3718          */
3719         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3720                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3721                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3722                         handled = 0;
3723                         goto out;
3724                 }
3725         }
3726
3727         /*
3728          * Writing any value to intr-mbox-0 clears PCI INTA# and
3729          * chip-internal interrupt pending events.
3730          * Writing non-zero to intr-mbox-0 additional tells the
3731          * NIC to stop sending us irqs, engaging "in-intr-handler"
3732          * event coalescing.
3733          *
3734          * Flush the mailbox to de-assert the IRQ immediately to prevent
3735          * spurious interrupts.  The flush impacts performance but
3736          * excessive spurious interrupts can be worse in some cases.
3737          */
3738         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3739         if (tg3_irq_sync(tp))
3740                 goto out;
3741         sblk->status &= ~SD_STATUS_UPDATED;
3742         if (likely(tg3_has_work(tp))) {
3743                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3744                 netif_rx_schedule(dev, &tp->napi);
3745         } else {
3746                 /* No work, shared interrupt perhaps?  re-enable
3747                  * interrupts, and flush that PCI write
3748                  */
3749                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3750                                0x00000000);
3751         }
3752 out:
3753         return IRQ_RETVAL(handled);
3754 }
3755
3756 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3757 {
3758         struct net_device *dev = dev_id;
3759         struct tg3 *tp = netdev_priv(dev);
3760         struct tg3_hw_status *sblk = tp->hw_status;
3761         unsigned int handled = 1;
3762
3763         /* In INTx mode, it is possible for the interrupt to arrive at
3764          * the CPU before the status block posted prior to the interrupt.
3765          * Reading the PCI State register will confirm whether the
3766          * interrupt is ours and will flush the status block.
3767          */
3768         if (unlikely(sblk->status_tag == tp->last_tag)) {
3769                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3770                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3771                         handled = 0;
3772                         goto out;
3773                 }
3774         }
3775
3776         /*
3777          * writing any value to intr-mbox-0 clears PCI INTA# and
3778          * chip-internal interrupt pending events.
3779          * writing non-zero to intr-mbox-0 additional tells the
3780          * NIC to stop sending us irqs, engaging "in-intr-handler"
3781          * event coalescing.
3782          *
3783          * Flush the mailbox to de-assert the IRQ immediately to prevent
3784          * spurious interrupts.  The flush impacts performance but
3785          * excessive spurious interrupts can be worse in some cases.
3786          */
3787         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3788         if (tg3_irq_sync(tp))
3789                 goto out;
3790         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3791                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3792                 /* Update last_tag to mark that this status has been
3793                  * seen. Because interrupt may be shared, we may be
3794                  * racing with tg3_poll(), so only update last_tag
3795                  * if tg3_poll() is not scheduled.
3796                  */
3797                 tp->last_tag = sblk->status_tag;
3798                 __netif_rx_schedule(dev, &tp->napi);
3799         }
3800 out:
3801         return IRQ_RETVAL(handled);
3802 }
3803
3804 /* ISR for interrupt test */
3805 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3806 {
3807         struct net_device *dev = dev_id;
3808         struct tg3 *tp = netdev_priv(dev);
3809         struct tg3_hw_status *sblk = tp->hw_status;
3810
3811         if ((sblk->status & SD_STATUS_UPDATED) ||
3812             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3813                 tg3_disable_ints(tp);
3814                 return IRQ_RETVAL(1);
3815         }
3816         return IRQ_RETVAL(0);
3817 }
3818
3819 static int tg3_init_hw(struct tg3 *, int);
3820 static int tg3_halt(struct tg3 *, int, int);
3821
3822 /* Restart hardware after configuration changes, self-test, etc.
3823  * Invoked with tp->lock held.
3824  */
3825 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3826 {
3827         int err;
3828
3829         err = tg3_init_hw(tp, reset_phy);
3830         if (err) {
3831                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3832                        "aborting.\n", tp->dev->name);
3833                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3834                 tg3_full_unlock(tp);
3835                 del_timer_sync(&tp->timer);
3836                 tp->irq_sync = 0;
3837                 napi_enable(&tp->napi);
3838                 dev_close(tp->dev);
3839                 tg3_full_lock(tp, 0);
3840         }
3841         return err;
3842 }
3843
3844 #ifdef CONFIG_NET_POLL_CONTROLLER
3845 static void tg3_poll_controller(struct net_device *dev)
3846 {
3847         struct tg3 *tp = netdev_priv(dev);
3848
3849         tg3_interrupt(tp->pdev->irq, dev);
3850 }
3851 #endif
3852
3853 static void tg3_reset_task(struct work_struct *work)
3854 {
3855         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3856         unsigned int restart_timer;
3857
3858         tg3_full_lock(tp, 0);
3859
3860         if (!netif_running(tp->dev)) {
3861                 tg3_full_unlock(tp);
3862                 return;
3863         }
3864
3865         tg3_full_unlock(tp);
3866
3867         tg3_netif_stop(tp);
3868
3869         tg3_full_lock(tp, 1);
3870
3871         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3872         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3873
3874         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3875                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3876                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3877                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3878                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3879         }
3880
3881         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3882         if (tg3_init_hw(tp, 1))
3883                 goto out;
3884
3885         tg3_netif_start(tp);
3886
3887         if (restart_timer)
3888                 mod_timer(&tp->timer, jiffies + 1);
3889
3890 out:
3891         tg3_full_unlock(tp);
3892 }
3893
3894 static void tg3_dump_short_state(struct tg3 *tp)
3895 {
3896         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3897                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3898         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3899                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3900 }
3901
3902 static void tg3_tx_timeout(struct net_device *dev)
3903 {
3904         struct tg3 *tp = netdev_priv(dev);
3905
3906         if (netif_msg_tx_err(tp)) {
3907                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3908                        dev->name);
3909                 tg3_dump_short_state(tp);
3910         }
3911
3912         schedule_work(&tp->reset_task);
3913 }
3914
3915 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3916 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3917 {
3918         u32 base = (u32) mapping & 0xffffffff;
3919
3920         return ((base > 0xffffdcc0) &&
3921                 (base + len + 8 < base));
3922 }
3923
3924 /* Test for DMA addresses > 40-bit */
3925 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3926                                           int len)
3927 {
3928 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3929         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3930                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3931         return 0;
3932 #else
3933         return 0;
3934 #endif
3935 }
3936
3937 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3938
3939 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3940 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3941                                        u32 last_plus_one, u32 *start,
3942                                        u32 base_flags, u32 mss)
3943 {
3944         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3945         dma_addr_t new_addr = 0;
3946         u32 entry = *start;
3947         int i, ret = 0;
3948
3949         if (!new_skb) {
3950                 ret = -1;
3951         } else {
3952                 /* New SKB is guaranteed to be linear. */
3953                 entry = *start;
3954                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3955                                           PCI_DMA_TODEVICE);
3956                 /* Make sure new skb does not cross any 4G boundaries.
3957                  * Drop the packet if it does.
3958                  */
3959                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3960                         ret = -1;
3961                         dev_kfree_skb(new_skb);
3962                         new_skb = NULL;
3963                 } else {
3964                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3965                                     base_flags, 1 | (mss << 1));
3966                         *start = NEXT_TX(entry);
3967                 }
3968         }
3969
3970         /* Now clean up the sw ring entries. */
3971         i = 0;
3972         while (entry != last_plus_one) {
3973                 int len;
3974
3975                 if (i == 0)
3976                         len = skb_headlen(skb);
3977                 else
3978                         len = skb_shinfo(skb)->frags[i-1].size;
3979                 pci_unmap_single(tp->pdev,
3980                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3981                                  len, PCI_DMA_TODEVICE);
3982                 if (i == 0) {
3983                         tp->tx_buffers[entry].skb = new_skb;
3984                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3985                 } else {
3986                         tp->tx_buffers[entry].skb = NULL;
3987                 }
3988                 entry = NEXT_TX(entry);
3989                 i++;
3990         }
3991
3992         dev_kfree_skb(skb);
3993
3994         return ret;
3995 }
3996
3997 static void tg3_set_txd(struct tg3 *tp, int entry,
3998                         dma_addr_t mapping, int len, u32 flags,
3999                         u32 mss_and_is_end)
4000 {
4001         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4002         int is_end = (mss_and_is_end & 0x1);
4003         u32 mss = (mss_and_is_end >> 1);
4004         u32 vlan_tag = 0;
4005
4006         if (is_end)
4007                 flags |= TXD_FLAG_END;
4008         if (flags & TXD_FLAG_VLAN) {
4009                 vlan_tag = flags >> 16;
4010                 flags &= 0xffff;
4011         }
4012         vlan_tag |= (mss << TXD_MSS_SHIFT);
4013
4014         txd->addr_hi = ((u64) mapping >> 32);
4015         txd->addr_lo = ((u64) mapping & 0xffffffff);
4016         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4017         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4018 }
4019
4020 /* hard_start_xmit for devices that don't have any bugs and
4021  * support TG3_FLG2_HW_TSO_2 only.
4022  */
4023 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4024 {
4025         struct tg3 *tp = netdev_priv(dev);
4026         dma_addr_t mapping;
4027         u32 len, entry, base_flags, mss;
4028
4029         len = skb_headlen(skb);
4030
4031         /* We are running in BH disabled context with netif_tx_lock
4032          * and TX reclaim runs via tp->napi.poll inside of a software
4033          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4034          * no IRQ context deadlocks to worry about either.  Rejoice!
4035          */
4036         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4037                 if (!netif_queue_stopped(dev)) {
4038                         netif_stop_queue(dev);
4039
4040                         /* This is a hard error, log it. */
4041                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4042                                "queue awake!\n", dev->name);
4043                 }
4044                 return NETDEV_TX_BUSY;
4045         }
4046
4047         entry = tp->tx_prod;
4048         base_flags = 0;
4049         mss = 0;
4050         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4051                 int tcp_opt_len, ip_tcp_len;
4052
4053                 if (skb_header_cloned(skb) &&
4054                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4055                         dev_kfree_skb(skb);
4056                         goto out_unlock;
4057                 }
4058
4059                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4060                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4061                 else {
4062                         struct iphdr *iph = ip_hdr(skb);
4063
4064                         tcp_opt_len = tcp_optlen(skb);
4065                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4066
4067                         iph->check = 0;
4068                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4069                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4070                 }
4071
4072                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4073                                TXD_FLAG_CPU_POST_DMA);
4074
4075                 tcp_hdr(skb)->check = 0;
4076
4077         }
4078         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4079                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4080 #if TG3_VLAN_TAG_USED
4081         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4082                 base_flags |= (TXD_FLAG_VLAN |
4083                                (vlan_tx_tag_get(skb) << 16));
4084 #endif
4085
4086         /* Queue skb data, a.k.a. the main skb fragment. */
4087         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4088
4089         tp->tx_buffers[entry].skb = skb;
4090         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4091
4092         tg3_set_txd(tp, entry, mapping, len, base_flags,
4093                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4094
4095         entry = NEXT_TX(entry);
4096
4097         /* Now loop through additional data fragments, and queue them. */
4098         if (skb_shinfo(skb)->nr_frags > 0) {
4099                 unsigned int i, last;
4100
4101                 last = skb_shinfo(skb)->nr_frags - 1;
4102                 for (i = 0; i <= last; i++) {
4103                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4104
4105                         len = frag->size;
4106                         mapping = pci_map_page(tp->pdev,
4107                                                frag->page,
4108                                                frag->page_offset,
4109                                                len, PCI_DMA_TODEVICE);
4110
4111                         tp->tx_buffers[entry].skb = NULL;
4112                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4113
4114                         tg3_set_txd(tp, entry, mapping, len,
4115                                     base_flags, (i == last) | (mss << 1));
4116
4117                         entry = NEXT_TX(entry);
4118                 }
4119         }
4120
4121         /* Packets are ready, update Tx producer idx local and on card. */
4122         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4123
4124         tp->tx_prod = entry;
4125         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4126                 netif_stop_queue(dev);
4127                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4128                         netif_wake_queue(tp->dev);
4129         }
4130
4131 out_unlock:
4132         mmiowb();
4133
4134         dev->trans_start = jiffies;
4135
4136         return NETDEV_TX_OK;
4137 }
4138
4139 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4140
4141 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4142  * TSO header is greater than 80 bytes.
4143  */
4144 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4145 {
4146         struct sk_buff *segs, *nskb;
4147
4148         /* Estimate the number of fragments in the worst case */
4149         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4150                 netif_stop_queue(tp->dev);
4151                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4152                         return NETDEV_TX_BUSY;
4153
4154                 netif_wake_queue(tp->dev);
4155         }
4156
4157         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4158         if (unlikely(IS_ERR(segs)))
4159                 goto tg3_tso_bug_end;
4160
4161         do {
4162                 nskb = segs;
4163                 segs = segs->next;
4164                 nskb->next = NULL;
4165                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4166         } while (segs);
4167
4168 tg3_tso_bug_end:
4169         dev_kfree_skb(skb);
4170
4171         return NETDEV_TX_OK;
4172 }
4173
4174 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4175  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4176  */
4177 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4178 {
4179         struct tg3 *tp = netdev_priv(dev);
4180         dma_addr_t mapping;
4181         u32 len, entry, base_flags, mss;
4182         int would_hit_hwbug;
4183
4184         len = skb_headlen(skb);
4185
4186         /* We are running in BH disabled context with netif_tx_lock
4187          * and TX reclaim runs via tp->napi.poll inside of a software
4188          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4189          * no IRQ context deadlocks to worry about either.  Rejoice!
4190          */
4191         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4192                 if (!netif_queue_stopped(dev)) {
4193                         netif_stop_queue(dev);
4194
4195                         /* This is a hard error, log it. */
4196                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4197                                "queue awake!\n", dev->name);
4198                 }
4199                 return NETDEV_TX_BUSY;
4200         }
4201
4202         entry = tp->tx_prod;
4203         base_flags = 0;
4204         if (skb->ip_summed == CHECKSUM_PARTIAL)
4205                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4206         mss = 0;
4207         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4208                 struct iphdr *iph;
4209                 int tcp_opt_len, ip_tcp_len, hdr_len;
4210
4211                 if (skb_header_cloned(skb) &&
4212                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4213                         dev_kfree_skb(skb);
4214                         goto out_unlock;
4215                 }
4216
4217                 tcp_opt_len = tcp_optlen(skb);
4218                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4219
4220                 hdr_len = ip_tcp_len + tcp_opt_len;
4221                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4222                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4223                         return (tg3_tso_bug(tp, skb));
4224
4225                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4226                                TXD_FLAG_CPU_POST_DMA);
4227
4228                 iph = ip_hdr(skb);
4229                 iph->check = 0;
4230                 iph->tot_len = htons(mss + hdr_len);
4231                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4232                         tcp_hdr(skb)->check = 0;
4233                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4234                 } else
4235                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4236                                                                  iph->daddr, 0,
4237                                                                  IPPROTO_TCP,
4238                                                                  0);
4239
4240                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4241                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4242                         if (tcp_opt_len || iph->ihl > 5) {
4243                                 int tsflags;
4244
4245                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4246                                 mss |= (tsflags << 11);
4247                         }
4248                 } else {
4249                         if (tcp_opt_len || iph->ihl > 5) {
4250                                 int tsflags;
4251
4252                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4253                                 base_flags |= tsflags << 12;
4254                         }
4255                 }
4256         }
4257 #if TG3_VLAN_TAG_USED
4258         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4259                 base_flags |= (TXD_FLAG_VLAN |
4260                                (vlan_tx_tag_get(skb) << 16));
4261 #endif
4262
4263         /* Queue skb data, a.k.a. the main skb fragment. */
4264         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4265
4266         tp->tx_buffers[entry].skb = skb;
4267         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4268
4269         would_hit_hwbug = 0;
4270
4271         if (tg3_4g_overflow_test(mapping, len))
4272                 would_hit_hwbug = 1;
4273
4274         tg3_set_txd(tp, entry, mapping, len, base_flags,
4275                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4276
4277         entry = NEXT_TX(entry);
4278
4279         /* Now loop through additional data fragments, and queue them. */
4280         if (skb_shinfo(skb)->nr_frags > 0) {
4281                 unsigned int i, last;
4282
4283                 last = skb_shinfo(skb)->nr_frags - 1;
4284                 for (i = 0; i <= last; i++) {
4285                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4286
4287                         len = frag->size;
4288                         mapping = pci_map_page(tp->pdev,
4289                                                frag->page,
4290                                                frag->page_offset,
4291                                                len, PCI_DMA_TODEVICE);
4292
4293                         tp->tx_buffers[entry].skb = NULL;
4294                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4295
4296                         if (tg3_4g_overflow_test(mapping, len))
4297                                 would_hit_hwbug = 1;
4298
4299                         if (tg3_40bit_overflow_test(tp, mapping, len))
4300                                 would_hit_hwbug = 1;
4301
4302                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4303                                 tg3_set_txd(tp, entry, mapping, len,
4304                                             base_flags, (i == last)|(mss << 1));
4305                         else
4306                                 tg3_set_txd(tp, entry, mapping, len,
4307                                             base_flags, (i == last));
4308
4309                         entry = NEXT_TX(entry);
4310                 }
4311         }
4312
4313         if (would_hit_hwbug) {
4314                 u32 last_plus_one = entry;
4315                 u32 start;
4316
4317                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4318                 start &= (TG3_TX_RING_SIZE - 1);
4319
4320                 /* If the workaround fails due to memory/mapping
4321                  * failure, silently drop this packet.
4322                  */
4323                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4324                                                 &start, base_flags, mss))
4325                         goto out_unlock;
4326
4327                 entry = start;
4328         }
4329
4330         /* Packets are ready, update Tx producer idx local and on card. */
4331         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4332
4333         tp->tx_prod = entry;
4334         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4335                 netif_stop_queue(dev);
4336                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4337                         netif_wake_queue(tp->dev);
4338         }
4339
4340 out_unlock:
4341         mmiowb();
4342
4343         dev->trans_start = jiffies;
4344
4345         return NETDEV_TX_OK;
4346 }
4347
4348 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4349                                int new_mtu)
4350 {
4351         dev->mtu = new_mtu;
4352
4353         if (new_mtu > ETH_DATA_LEN) {
4354                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4355                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4356                         ethtool_op_set_tso(dev, 0);
4357                 }
4358                 else
4359                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4360         } else {
4361                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4362                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4363                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4364         }
4365 }
4366
4367 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4368 {
4369         struct tg3 *tp = netdev_priv(dev);
4370         int err;
4371
4372         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4373                 return -EINVAL;
4374
4375         if (!netif_running(dev)) {
4376                 /* We'll just catch it later when the
4377                  * device is up'd.
4378                  */
4379                 tg3_set_mtu(dev, tp, new_mtu);
4380                 return 0;
4381         }
4382
4383         tg3_netif_stop(tp);
4384
4385         tg3_full_lock(tp, 1);
4386
4387         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4388
4389         tg3_set_mtu(dev, tp, new_mtu);
4390
4391         err = tg3_restart_hw(tp, 0);
4392
4393         if (!err)
4394                 tg3_netif_start(tp);
4395
4396         tg3_full_unlock(tp);
4397
4398         return err;
4399 }
4400
4401 /* Free up pending packets in all rx/tx rings.
4402  *
4403  * The chip has been shut down and the driver detached from
4404  * the networking, so no interrupts or new tx packets will
4405  * end up in the driver.  tp->{tx,}lock is not held and we are not
4406  * in an interrupt context and thus may sleep.
4407  */
4408 static void tg3_free_rings(struct tg3 *tp)
4409 {
4410         struct ring_info *rxp;
4411         int i;
4412
4413         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4414                 rxp = &tp->rx_std_buffers[i];
4415
4416                 if (rxp->skb == NULL)
4417                         continue;
4418                 pci_unmap_single(tp->pdev,
4419                                  pci_unmap_addr(rxp, mapping),
4420                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4421                                  PCI_DMA_FROMDEVICE);
4422                 dev_kfree_skb_any(rxp->skb);
4423                 rxp->skb = NULL;
4424         }
4425
4426         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4427                 rxp = &tp->rx_jumbo_buffers[i];
4428
4429                 if (rxp->skb == NULL)
4430                         continue;
4431                 pci_unmap_single(tp->pdev,
4432                                  pci_unmap_addr(rxp, mapping),
4433                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4434                                  PCI_DMA_FROMDEVICE);
4435                 dev_kfree_skb_any(rxp->skb);
4436                 rxp->skb = NULL;
4437         }
4438
4439         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4440                 struct tx_ring_info *txp;
4441                 struct sk_buff *skb;
4442                 int j;
4443
4444                 txp = &tp->tx_buffers[i];
4445                 skb = txp->skb;
4446
4447                 if (skb == NULL) {
4448                         i++;
4449                         continue;
4450                 }
4451
4452                 pci_unmap_single(tp->pdev,
4453                                  pci_unmap_addr(txp, mapping),
4454                                  skb_headlen(skb),
4455                                  PCI_DMA_TODEVICE);
4456                 txp->skb = NULL;
4457
4458                 i++;
4459
4460                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4461                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4462                         pci_unmap_page(tp->pdev,
4463                                        pci_unmap_addr(txp, mapping),
4464                                        skb_shinfo(skb)->frags[j].size,
4465                                        PCI_DMA_TODEVICE);
4466                         i++;
4467                 }
4468
4469                 dev_kfree_skb_any(skb);
4470         }
4471 }
4472
4473 /* Initialize tx/rx rings for packet processing.
4474  *
4475  * The chip has been shut down and the driver detached from
4476  * the networking, so no interrupts or new tx packets will
4477  * end up in the driver.  tp->{tx,}lock are held and thus
4478  * we may not sleep.
4479  */
4480 static int tg3_init_rings(struct tg3 *tp)
4481 {
4482         u32 i;
4483
4484         /* Free up all the SKBs. */
4485         tg3_free_rings(tp);
4486
4487         /* Zero out all descriptors. */
4488         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4489         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4490         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4491         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4492
4493         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4494         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4495             (tp->dev->mtu > ETH_DATA_LEN))
4496                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4497
4498         /* Initialize invariants of the rings, we only set this
4499          * stuff once.  This works because the card does not
4500          * write into the rx buffer posting rings.
4501          */
4502         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4503                 struct tg3_rx_buffer_desc *rxd;
4504
4505                 rxd = &tp->rx_std[i];
4506                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4507                         << RXD_LEN_SHIFT;
4508                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4509                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4510                                (i << RXD_OPAQUE_INDEX_SHIFT));
4511         }
4512
4513         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4514                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4515                         struct tg3_rx_buffer_desc *rxd;
4516
4517                         rxd = &tp->rx_jumbo[i];
4518                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4519                                 << RXD_LEN_SHIFT;
4520                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4521                                 RXD_FLAG_JUMBO;
4522                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4523                                (i << RXD_OPAQUE_INDEX_SHIFT));
4524                 }
4525         }
4526
4527         /* Now allocate fresh SKBs for each rx ring. */
4528         for (i = 0; i < tp->rx_pending; i++) {
4529                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4530                         printk(KERN_WARNING PFX
4531                                "%s: Using a smaller RX standard ring, "
4532                                "only %d out of %d buffers were allocated "
4533                                "successfully.\n",
4534                                tp->dev->name, i, tp->rx_pending);
4535                         if (i == 0)
4536                                 return -ENOMEM;
4537                         tp->rx_pending = i;
4538                         break;
4539                 }
4540         }
4541
4542         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4543                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4544                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4545                                              -1, i) < 0) {
4546                                 printk(KERN_WARNING PFX
4547                                        "%s: Using a smaller RX jumbo ring, "
4548                                        "only %d out of %d buffers were "
4549                                        "allocated successfully.\n",
4550                                        tp->dev->name, i, tp->rx_jumbo_pending);
4551                                 if (i == 0) {
4552                                         tg3_free_rings(tp);
4553                                         return -ENOMEM;
4554                                 }
4555                                 tp->rx_jumbo_pending = i;
4556                                 break;
4557                         }
4558                 }
4559         }
4560         return 0;
4561 }
4562
4563 /*
4564  * Must not be invoked with interrupt sources disabled and
4565  * the hardware shutdown down.
4566  */
4567 static void tg3_free_consistent(struct tg3 *tp)
4568 {
4569         kfree(tp->rx_std_buffers);
4570         tp->rx_std_buffers = NULL;
4571         if (tp->rx_std) {
4572                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4573                                     tp->rx_std, tp->rx_std_mapping);
4574                 tp->rx_std = NULL;
4575         }
4576         if (tp->rx_jumbo) {
4577                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4578                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4579                 tp->rx_jumbo = NULL;
4580         }
4581         if (tp->rx_rcb) {
4582                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4583                                     tp->rx_rcb, tp->rx_rcb_mapping);
4584                 tp->rx_rcb = NULL;
4585         }
4586         if (tp->tx_ring) {
4587                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4588                         tp->tx_ring, tp->tx_desc_mapping);
4589                 tp->tx_ring = NULL;
4590         }
4591         if (tp->hw_status) {
4592                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4593                                     tp->hw_status, tp->status_mapping);
4594                 tp->hw_status = NULL;
4595         }
4596         if (tp->hw_stats) {
4597                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4598                                     tp->hw_stats, tp->stats_mapping);
4599                 tp->hw_stats = NULL;
4600         }
4601 }
4602
4603 /*
4604  * Must not be invoked with interrupt sources disabled and
4605  * the hardware shutdown down.  Can sleep.
4606  */
4607 static int tg3_alloc_consistent(struct tg3 *tp)
4608 {
4609         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4610                                       (TG3_RX_RING_SIZE +
4611                                        TG3_RX_JUMBO_RING_SIZE)) +
4612                                      (sizeof(struct tx_ring_info) *
4613                                       TG3_TX_RING_SIZE),
4614                                      GFP_KERNEL);
4615         if (!tp->rx_std_buffers)
4616                 return -ENOMEM;
4617
4618         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4619         tp->tx_buffers = (struct tx_ring_info *)
4620                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4621
4622         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4623                                           &tp->rx_std_mapping);
4624         if (!tp->rx_std)
4625                 goto err_out;
4626
4627         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4628                                             &tp->rx_jumbo_mapping);
4629
4630         if (!tp->rx_jumbo)
4631                 goto err_out;
4632
4633         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4634                                           &tp->rx_rcb_mapping);
4635         if (!tp->rx_rcb)
4636                 goto err_out;
4637
4638         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4639                                            &tp->tx_desc_mapping);
4640         if (!tp->tx_ring)
4641                 goto err_out;
4642
4643         tp->hw_status = pci_alloc_consistent(tp->pdev,
4644                                              TG3_HW_STATUS_SIZE,
4645                                              &tp->status_mapping);
4646         if (!tp->hw_status)
4647                 goto err_out;
4648
4649         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4650                                             sizeof(struct tg3_hw_stats),
4651                                             &tp->stats_mapping);
4652         if (!tp->hw_stats)
4653                 goto err_out;
4654
4655         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4656         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4657
4658         return 0;
4659
4660 err_out:
4661         tg3_free_consistent(tp);
4662         return -ENOMEM;
4663 }
4664
4665 #define MAX_WAIT_CNT 1000
4666
4667 /* To stop a block, clear the enable bit and poll till it
4668  * clears.  tp->lock is held.
4669  */
4670 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4671 {
4672         unsigned int i;
4673         u32 val;
4674
4675         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4676                 switch (ofs) {
4677                 case RCVLSC_MODE:
4678                 case DMAC_MODE:
4679                 case MBFREE_MODE:
4680                 case BUFMGR_MODE:
4681                 case MEMARB_MODE:
4682                         /* We can't enable/disable these bits of the
4683                          * 5705/5750, just say success.
4684                          */
4685                         return 0;
4686
4687                 default:
4688                         break;
4689                 };
4690         }
4691
4692         val = tr32(ofs);
4693         val &= ~enable_bit;
4694         tw32_f(ofs, val);
4695
4696         for (i = 0; i < MAX_WAIT_CNT; i++) {
4697                 udelay(100);
4698                 val = tr32(ofs);
4699                 if ((val & enable_bit) == 0)
4700                         break;
4701         }
4702
4703         if (i == MAX_WAIT_CNT && !silent) {
4704                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4705                        "ofs=%lx enable_bit=%x\n",
4706                        ofs, enable_bit);
4707                 return -ENODEV;
4708         }
4709
4710         return 0;
4711 }
4712
4713 /* tp->lock is held. */
4714 static int tg3_abort_hw(struct tg3 *tp, int silent)
4715 {
4716         int i, err;
4717
4718         tg3_disable_ints(tp);
4719
4720         tp->rx_mode &= ~RX_MODE_ENABLE;
4721         tw32_f(MAC_RX_MODE, tp->rx_mode);
4722         udelay(10);
4723
4724         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4725         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4726         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4727         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4728         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4729         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4730
4731         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4732         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4733         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4734         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4735         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4736         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4737         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4738
4739         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4740         tw32_f(MAC_MODE, tp->mac_mode);
4741         udelay(40);
4742
4743         tp->tx_mode &= ~TX_MODE_ENABLE;
4744         tw32_f(MAC_TX_MODE, tp->tx_mode);
4745
4746         for (i = 0; i < MAX_WAIT_CNT; i++) {
4747                 udelay(100);
4748                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4749                         break;
4750         }
4751         if (i >= MAX_WAIT_CNT) {
4752                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4753                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4754                        tp->dev->name, tr32(MAC_TX_MODE));
4755                 err |= -ENODEV;
4756         }
4757
4758         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4759         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4760         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4761
4762         tw32(FTQ_RESET, 0xffffffff);
4763         tw32(FTQ_RESET, 0x00000000);
4764
4765         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4766         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4767
4768         if (tp->hw_status)
4769                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4770         if (tp->hw_stats)
4771                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4772
4773         return err;
4774 }
4775
4776 /* tp->lock is held. */
4777 static int tg3_nvram_lock(struct tg3 *tp)
4778 {
4779         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4780                 int i;
4781
4782                 if (tp->nvram_lock_cnt == 0) {
4783                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4784                         for (i = 0; i < 8000; i++) {
4785                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4786                                         break;
4787                                 udelay(20);
4788                         }
4789                         if (i == 8000) {
4790                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4791                                 return -ENODEV;
4792                         }
4793                 }
4794                 tp->nvram_lock_cnt++;
4795         }
4796         return 0;
4797 }
4798
4799 /* tp->lock is held. */
4800 static void tg3_nvram_unlock(struct tg3 *tp)
4801 {
4802         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4803                 if (tp->nvram_lock_cnt > 0)
4804                         tp->nvram_lock_cnt--;
4805                 if (tp->nvram_lock_cnt == 0)
4806                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4807         }
4808 }
4809
4810 /* tp->lock is held. */
4811 static void tg3_enable_nvram_access(struct tg3 *tp)
4812 {
4813         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4814             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4815                 u32 nvaccess = tr32(NVRAM_ACCESS);
4816
4817                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4818         }
4819 }
4820
4821 /* tp->lock is held. */
4822 static void tg3_disable_nvram_access(struct tg3 *tp)
4823 {
4824         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4825             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4826                 u32 nvaccess = tr32(NVRAM_ACCESS);
4827
4828                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4829         }
4830 }
4831
4832 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4833 {
4834         int i;
4835         u32 apedata;
4836
4837         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4838         if (apedata != APE_SEG_SIG_MAGIC)
4839                 return;
4840
4841         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4842         if (apedata != APE_FW_STATUS_READY)
4843                 return;
4844
4845         /* Wait for up to 1 millisecond for APE to service previous event. */
4846         for (i = 0; i < 10; i++) {
4847                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4848                         return;
4849
4850                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4851
4852                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4853                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4854                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4855
4856                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4857
4858                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4859                         break;
4860
4861                 udelay(100);
4862         }
4863
4864         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4865                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4866 }
4867
4868 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4869 {
4870         u32 event;
4871         u32 apedata;
4872
4873         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4874                 return;
4875
4876         switch (kind) {
4877                 case RESET_KIND_INIT:
4878                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4879                                         APE_HOST_SEG_SIG_MAGIC);
4880                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4881                                         APE_HOST_SEG_LEN_MAGIC);
4882                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4883                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4884                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4885                                         APE_HOST_DRIVER_ID_MAGIC);
4886                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4887                                         APE_HOST_BEHAV_NO_PHYLOCK);
4888
4889                         event = APE_EVENT_STATUS_STATE_START;
4890                         break;
4891                 case RESET_KIND_SHUTDOWN:
4892                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4893                         break;
4894                 case RESET_KIND_SUSPEND:
4895                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4896                         break;
4897                 default:
4898                         return;
4899         }
4900
4901         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4902
4903         tg3_ape_send_event(tp, event);
4904 }
4905
4906 /* tp->lock is held. */
4907 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4908 {
4909         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4910                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4911
4912         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4913                 switch (kind) {
4914                 case RESET_KIND_INIT:
4915                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4916                                       DRV_STATE_START);
4917                         break;
4918
4919                 case RESET_KIND_SHUTDOWN:
4920                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4921                                       DRV_STATE_UNLOAD);
4922                         break;
4923
4924                 case RESET_KIND_SUSPEND:
4925                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4926                                       DRV_STATE_SUSPEND);
4927                         break;
4928
4929                 default:
4930                         break;
4931                 };
4932         }
4933
4934         if (kind == RESET_KIND_INIT ||
4935             kind == RESET_KIND_SUSPEND)
4936                 tg3_ape_driver_state_change(tp, kind);
4937 }
4938
4939 /* tp->lock is held. */
4940 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4941 {
4942         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4943                 switch (kind) {
4944                 case RESET_KIND_INIT:
4945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4946                                       DRV_STATE_START_DONE);
4947                         break;
4948
4949                 case RESET_KIND_SHUTDOWN:
4950                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4951                                       DRV_STATE_UNLOAD_DONE);
4952                         break;
4953
4954                 default:
4955                         break;
4956                 };
4957         }
4958
4959         if (kind == RESET_KIND_SHUTDOWN)
4960                 tg3_ape_driver_state_change(tp, kind);
4961 }
4962
4963 /* tp->lock is held. */
4964 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4965 {
4966         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4967                 switch (kind) {
4968                 case RESET_KIND_INIT:
4969                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4970                                       DRV_STATE_START);
4971                         break;
4972
4973                 case RESET_KIND_SHUTDOWN:
4974                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4975                                       DRV_STATE_UNLOAD);
4976                         break;
4977
4978                 case RESET_KIND_SUSPEND:
4979                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4980                                       DRV_STATE_SUSPEND);
4981                         break;
4982
4983                 default:
4984                         break;
4985                 };
4986         }
4987 }
4988
4989 static int tg3_poll_fw(struct tg3 *tp)
4990 {
4991         int i;
4992         u32 val;
4993
4994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4995                 /* Wait up to 20ms for init done. */
4996                 for (i = 0; i < 200; i++) {
4997                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4998                                 return 0;
4999                         udelay(100);
5000                 }
5001                 return -ENODEV;
5002         }
5003
5004         /* Wait for firmware initialization to complete. */
5005         for (i = 0; i < 100000; i++) {
5006                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5007                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5008                         break;
5009                 udelay(10);
5010         }
5011
5012         /* Chip might not be fitted with firmware.  Some Sun onboard
5013          * parts are configured like that.  So don't signal the timeout
5014          * of the above loop as an error, but do report the lack of
5015          * running firmware once.
5016          */
5017         if (i >= 100000 &&
5018             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5019                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5020
5021                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5022                        tp->dev->name);
5023         }
5024
5025         return 0;
5026 }
5027
5028 /* Save PCI command register before chip reset */
5029 static void tg3_save_pci_state(struct tg3 *tp)
5030 {
5031         u32 val;
5032
5033         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5034         tp->pci_cmd = val;
5035 }
5036
5037 /* Restore PCI state after chip reset */
5038 static void tg3_restore_pci_state(struct tg3 *tp)
5039 {
5040         u32 val;
5041
5042         /* Re-enable indirect register accesses. */
5043         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5044                                tp->misc_host_ctrl);
5045
5046         /* Set MAX PCI retry to zero. */
5047         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5048         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5049             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5050                 val |= PCISTATE_RETRY_SAME_DMA;
5051         /* Allow reads and writes to the APE register and memory space. */
5052         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5053                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5054                        PCISTATE_ALLOW_APE_SHMEM_WR;
5055         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5056
5057         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5058
5059         /* Make sure PCI-X relaxed ordering bit is clear. */
5060         if (tp->pcix_cap) {
5061                 u16 pcix_cmd;
5062
5063                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5064                                      &pcix_cmd);
5065                 pcix_cmd &= ~PCI_X_CMD_ERO;
5066                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5067                                       pcix_cmd);
5068         }
5069
5070         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5071
5072                 /* Chip reset on 5780 will reset MSI enable bit,
5073                  * so need to restore it.
5074                  */
5075                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5076                         u16 ctrl;
5077
5078                         pci_read_config_word(tp->pdev,
5079                                              tp->msi_cap + PCI_MSI_FLAGS,
5080                                              &ctrl);
5081                         pci_write_config_word(tp->pdev,
5082                                               tp->msi_cap + PCI_MSI_FLAGS,
5083                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5084                         val = tr32(MSGINT_MODE);
5085                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5086                 }
5087         }
5088 }
5089
5090 static void tg3_stop_fw(struct tg3 *);
5091
5092 /* tp->lock is held. */
5093 static int tg3_chip_reset(struct tg3 *tp)
5094 {
5095         u32 val;
5096         void (*write_op)(struct tg3 *, u32, u32);
5097         int err;
5098
5099         tg3_nvram_lock(tp);
5100
5101         /* No matching tg3_nvram_unlock() after this because
5102          * chip reset below will undo the nvram lock.
5103          */
5104         tp->nvram_lock_cnt = 0;
5105
5106         /* GRC_MISC_CFG core clock reset will clear the memory
5107          * enable bit in PCI register 4 and the MSI enable bit
5108          * on some chips, so we save relevant registers here.
5109          */
5110         tg3_save_pci_state(tp);
5111
5112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5117                 tw32(GRC_FASTBOOT_PC, 0);
5118
5119         /*
5120          * We must avoid the readl() that normally takes place.
5121          * It locks machines, causes machine checks, and other
5122          * fun things.  So, temporarily disable the 5701
5123          * hardware workaround, while we do the reset.
5124          */
5125         write_op = tp->write32;
5126         if (write_op == tg3_write_flush_reg32)
5127                 tp->write32 = tg3_write32;
5128
5129         /* Prevent the irq handler from reading or writing PCI registers
5130          * during chip reset when the memory enable bit in the PCI command
5131          * register may be cleared.  The chip does not generate interrupt
5132          * at this time, but the irq handler may still be called due to irq
5133          * sharing or irqpoll.
5134          */
5135         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5136         if (tp->hw_status) {
5137                 tp->hw_status->status = 0;
5138                 tp->hw_status->status_tag = 0;
5139         }
5140         tp->last_tag = 0;
5141         smp_mb();
5142         synchronize_irq(tp->pdev->irq);
5143
5144         /* do the reset */
5145         val = GRC_MISC_CFG_CORECLK_RESET;
5146
5147         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5148                 if (tr32(0x7e2c) == 0x60) {
5149                         tw32(0x7e2c, 0x20);
5150                 }
5151                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5152                         tw32(GRC_MISC_CFG, (1 << 29));
5153                         val |= (1 << 29);
5154                 }
5155         }
5156
5157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5158                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5159                 tw32(GRC_VCPU_EXT_CTRL,
5160                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5161         }
5162
5163         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5164                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5165         tw32(GRC_MISC_CFG, val);
5166
5167         /* restore 5701 hardware bug workaround write method */
5168         tp->write32 = write_op;
5169
5170         /* Unfortunately, we have to delay before the PCI read back.
5171          * Some 575X chips even will not respond to a PCI cfg access
5172          * when the reset command is given to the chip.
5173          *
5174          * How do these hardware designers expect things to work
5175          * properly if the PCI write is posted for a long period
5176          * of time?  It is always necessary to have some method by
5177          * which a register read back can occur to push the write
5178          * out which does the reset.
5179          *
5180          * For most tg3 variants the trick below was working.
5181          * Ho hum...
5182          */
5183         udelay(120);
5184
5185         /* Flush PCI posted writes.  The normal MMIO registers
5186          * are inaccessible at this time so this is the only
5187          * way to make this reliably (actually, this is no longer
5188          * the case, see above).  I tried to use indirect
5189          * register read/write but this upset some 5701 variants.
5190          */
5191         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5192
5193         udelay(120);
5194
5195         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5196                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5197                         int i;
5198                         u32 cfg_val;
5199
5200                         /* Wait for link training to complete.  */
5201                         for (i = 0; i < 5000; i++)
5202                                 udelay(100);
5203
5204                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5205                         pci_write_config_dword(tp->pdev, 0xc4,
5206                                                cfg_val | (1 << 15));
5207                 }
5208                 /* Set PCIE max payload size and clear error status.  */
5209                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5210         }
5211
5212         tg3_restore_pci_state(tp);
5213
5214         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5215
5216         val = 0;
5217         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5218                 val = tr32(MEMARB_MODE);
5219         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5220
5221         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5222                 tg3_stop_fw(tp);
5223                 tw32(0x5000, 0x400);
5224         }
5225
5226         tw32(GRC_MODE, tp->grc_mode);
5227
5228         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5229                 val = tr32(0xc4);
5230
5231                 tw32(0xc4, val | (1 << 15));
5232         }
5233
5234         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5236                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5237                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5238                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5239                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5240         }
5241
5242         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5243                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5244                 tw32_f(MAC_MODE, tp->mac_mode);
5245         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5246                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5247                 tw32_f(MAC_MODE, tp->mac_mode);
5248         } else
5249                 tw32_f(MAC_MODE, 0);
5250         udelay(40);
5251
5252         err = tg3_poll_fw(tp);
5253         if (err)
5254                 return err;
5255
5256         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5257             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5258                 val = tr32(0x7c00);
5259
5260                 tw32(0x7c00, val | (1 << 25));
5261         }
5262
5263         /* Reprobe ASF enable state.  */
5264         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5265         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5266         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5267         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5268                 u32 nic_cfg;
5269
5270                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5271                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5272                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5273                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5274                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5275                 }
5276         }
5277
5278         return 0;
5279 }
5280
5281 /* tp->lock is held. */
5282 static void tg3_stop_fw(struct tg3 *tp)
5283 {
5284         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5285            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5286                 u32 val;
5287                 int i;
5288
5289                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5290                 val = tr32(GRC_RX_CPU_EVENT);
5291                 val |= (1 << 14);
5292                 tw32(GRC_RX_CPU_EVENT, val);
5293
5294                 /* Wait for RX cpu to ACK the event.  */
5295                 for (i = 0; i < 100; i++) {
5296                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5297                                 break;
5298                         udelay(1);
5299                 }
5300         }
5301 }
5302
5303 /* tp->lock is held. */
5304 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5305 {
5306         int err;
5307
5308         tg3_stop_fw(tp);
5309
5310         tg3_write_sig_pre_reset(tp, kind);
5311
5312         tg3_abort_hw(tp, silent);
5313         err = tg3_chip_reset(tp);
5314
5315         tg3_write_sig_legacy(tp, kind);
5316         tg3_write_sig_post_reset(tp, kind);
5317
5318         if (err)
5319                 return err;
5320
5321         return 0;
5322 }
5323
5324 #define TG3_FW_RELEASE_MAJOR    0x0
5325 #define TG3_FW_RELASE_MINOR     0x0
5326 #define TG3_FW_RELEASE_FIX      0x0
5327 #define TG3_FW_START_ADDR       0x08000000
5328 #define TG3_FW_TEXT_ADDR        0x08000000
5329 #define TG3_FW_TEXT_LEN         0x9c0
5330 #define TG3_FW_RODATA_ADDR      0x080009c0
5331 #define TG3_FW_RODATA_LEN       0x60
5332 #define TG3_FW_DATA_ADDR        0x08000a40
5333 #define TG3_FW_DATA_LEN         0x20
5334 #define TG3_FW_SBSS_ADDR        0x08000a60
5335 #define TG3_FW_SBSS_LEN         0xc
5336 #define TG3_FW_BSS_ADDR         0x08000a70
5337 #define TG3_FW_BSS_LEN          0x10
5338
5339 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5340         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5341         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5342         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5343         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5344         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5345         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5346         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5347         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5348         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5349         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5350         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5351         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5352         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5353         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5354         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5355         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5356         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5357         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5358         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5359         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5360         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5361         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5362         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5363         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5364         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5365         0, 0, 0, 0, 0, 0,
5366         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5367         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5368         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5369         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5370         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5371         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5372         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5373         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5374         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5375         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5376         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5377         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5378         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5379         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5380         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5381         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5382         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5383         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5384         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5385         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5386         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5387         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5388         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5389         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5390         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5391         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5392         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5393         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5394         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5395         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5396         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5397         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5398         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5399         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5400         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5401         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5402         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5403         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5404         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5405         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5406         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5407         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5408         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5409         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5410         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5411         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5412         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5413         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5414         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5415         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5416         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5417         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5418         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5419         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5420         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5421         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5422         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5423         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5424         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5425         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5426         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5427         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5428         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5429         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5430         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5431 };
5432
5433 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5434         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5435         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5436         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5437         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5438         0x00000000
5439 };
5440
5441 #if 0 /* All zeros, don't eat up space with it. */
5442 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5443         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5444         0x00000000, 0x00000000, 0x00000000, 0x00000000
5445 };
5446 #endif
5447
5448 #define RX_CPU_SCRATCH_BASE     0x30000
5449 #define RX_CPU_SCRATCH_SIZE     0x04000
5450 #define TX_CPU_SCRATCH_BASE     0x34000
5451 #define TX_CPU_SCRATCH_SIZE     0x04000
5452
5453 /* tp->lock is held. */
5454 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5455 {
5456         int i;
5457
5458         BUG_ON(offset == TX_CPU_BASE &&
5459             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5460
5461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5462                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5463
5464                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5465                 return 0;
5466         }
5467         if (offset == RX_CPU_BASE) {
5468                 for (i = 0; i < 10000; i++) {
5469                         tw32(offset + CPU_STATE, 0xffffffff);
5470                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5471                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5472                                 break;
5473                 }
5474
5475                 tw32(offset + CPU_STATE, 0xffffffff);
5476                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5477                 udelay(10);
5478         } else {
5479                 for (i = 0; i < 10000; i++) {
5480                         tw32(offset + CPU_STATE, 0xffffffff);
5481                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5482                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5483                                 break;
5484                 }
5485         }
5486
5487         if (i >= 10000) {
5488                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5489                        "and %s CPU\n",
5490                        tp->dev->name,
5491                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5492                 return -ENODEV;
5493         }
5494
5495         /* Clear firmware's nvram arbitration. */
5496         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5497                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5498         return 0;
5499 }
5500
5501 struct fw_info {
5502         unsigned int text_base;
5503         unsigned int text_len;
5504         const u32 *text_data;
5505         unsigned int rodata_base;
5506         unsigned int rodata_len;
5507         const u32 *rodata_data;
5508         unsigned int data_base;
5509         unsigned int data_len;
5510         const u32 *data_data;
5511 };
5512
5513 /* tp->lock is held. */
5514 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5515                                  int cpu_scratch_size, struct fw_info *info)
5516 {
5517         int err, lock_err, i;
5518         void (*write_op)(struct tg3 *, u32, u32);
5519
5520         if (cpu_base == TX_CPU_BASE &&
5521             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5522                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5523                        "TX cpu firmware on %s which is 5705.\n",
5524                        tp->dev->name);
5525                 return -EINVAL;
5526         }
5527
5528         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5529                 write_op = tg3_write_mem;
5530         else
5531                 write_op = tg3_write_indirect_reg32;
5532
5533         /* It is possible that bootcode is still loading at this point.
5534          * Get the nvram lock first before halting the cpu.
5535          */
5536         lock_err = tg3_nvram_lock(tp);
5537         err = tg3_halt_cpu(tp, cpu_base);
5538         if (!lock_err)
5539                 tg3_nvram_unlock(tp);
5540         if (err)
5541                 goto out;
5542
5543         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5544                 write_op(tp, cpu_scratch_base + i, 0);
5545         tw32(cpu_base + CPU_STATE, 0xffffffff);
5546         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5547         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5548                 write_op(tp, (cpu_scratch_base +
5549                               (info->text_base & 0xffff) +
5550                               (i * sizeof(u32))),
5551                          (info->text_data ?
5552                           info->text_data[i] : 0));
5553         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5554                 write_op(tp, (cpu_scratch_base +
5555                               (info->rodata_base & 0xffff) +
5556                               (i * sizeof(u32))),
5557                          (info->rodata_data ?
5558                           info->rodata_data[i] : 0));
5559         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5560                 write_op(tp, (cpu_scratch_base +
5561                               (info->data_base & 0xffff) +
5562                               (i * sizeof(u32))),
5563                          (info->data_data ?
5564                           info->data_data[i] : 0));
5565
5566         err = 0;
5567
5568 out:
5569         return err;
5570 }
5571
5572 /* tp->lock is held. */
5573 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5574 {
5575         struct fw_info info;
5576         int err, i;
5577
5578         info.text_base = TG3_FW_TEXT_ADDR;
5579         info.text_len = TG3_FW_TEXT_LEN;
5580         info.text_data = &tg3FwText[0];
5581         info.rodata_base = TG3_FW_RODATA_ADDR;
5582         info.rodata_len = TG3_FW_RODATA_LEN;
5583         info.rodata_data = &tg3FwRodata[0];
5584         info.data_base = TG3_FW_DATA_ADDR;
5585         info.data_len = TG3_FW_DATA_LEN;
5586         info.data_data = NULL;
5587
5588         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5589                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5590                                     &info);
5591         if (err)
5592                 return err;
5593
5594         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5595                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5596                                     &info);
5597         if (err)
5598                 return err;
5599
5600         /* Now startup only the RX cpu. */
5601         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5602         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5603
5604         for (i = 0; i < 5; i++) {
5605                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5606                         break;
5607                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5608                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5609                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5610                 udelay(1000);
5611         }
5612         if (i >= 5) {
5613                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5614                        "to set RX CPU PC, is %08x should be %08x\n",
5615                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5616                        TG3_FW_TEXT_ADDR);
5617                 return -ENODEV;
5618         }
5619         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5620         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5621
5622         return 0;
5623 }
5624
5625
5626 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5627 #define TG3_TSO_FW_RELASE_MINOR         0x6
5628 #define TG3_TSO_FW_RELEASE_FIX          0x0
5629 #define TG3_TSO_FW_START_ADDR           0x08000000
5630 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5631 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5632 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5633 #define TG3_TSO_FW_RODATA_LEN           0x60
5634 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5635 #define TG3_TSO_FW_DATA_LEN             0x30
5636 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5637 #define TG3_TSO_FW_SBSS_LEN             0x2c
5638 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5639 #define TG3_TSO_FW_BSS_LEN              0x894
5640
5641 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5642         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5643         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5644         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5645         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5646         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5647         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5648         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5649         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5650         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5651         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5652         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5653         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5654         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5655         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5656         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5657         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5658         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5659         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5660         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5661         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5662         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5663         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5664         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5665         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5666         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5667         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5668         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5669         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5670         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5671         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5672         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5673         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5674         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5675         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5676         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5677         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5678         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5679         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5680         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5681         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5682         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5683         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5684         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5685         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5686         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5687         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5688         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5689         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5690         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5691         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5692         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5693         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5694         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5695         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5696         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5697         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5698         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5699         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5700         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5701         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5702         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5703         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5704         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5705         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5706         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5707         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5708         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5709         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5710         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5711         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5712         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5713         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5714         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5715         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5716         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5717         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5718         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5719         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5720         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5721         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5722         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5723         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5724         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5725         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5726         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5727         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5728         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5729         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5730         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5731         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5732         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5733         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5734         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5735         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5736         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5737         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5738         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5739         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5740         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5741         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5742         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5743         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5744         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5745         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5746         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5747         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5748         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5749         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5750         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5751         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5752         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5753         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5754         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5755         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5756         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5757         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5758         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5759         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5760         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5761         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5762         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5763         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5764         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5765         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5766         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5767         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5768         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5769         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5770         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5771         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5772         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5773         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5774         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5775         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5776         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5777         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5778         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5779         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5780         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5781         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5782         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5783         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5784         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5785         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5786         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5787         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5788         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5789         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5790         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5791         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5792         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5793         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5794         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5795         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5796         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5797         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5798         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5799         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5800         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5801         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5802         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5803         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5804         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5805         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5806         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5807         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5808         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5809         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5810         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5811         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5812         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5813         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5814         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5815         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5816         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5817         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5818         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5819         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5820         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5821         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5822         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5823         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5824         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5825         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5826         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5827         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5828         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5829         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5830         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5831         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5832         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5833         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5834         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5835         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5836         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5837         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5838         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5839         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5840         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5841         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5842         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5843         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5844         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5845         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5846         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5847         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5848         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5849         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5850         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5851         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5852         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5853         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5854         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5855         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5856         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5857         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5858         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5859         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5860         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5861         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5862         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5863         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5864         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5865         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5866         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5867         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5868         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5869         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5870         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5871         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5872         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5873         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5874         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5875         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5876         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5877         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5878         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5879         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5880         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5881         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5882         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5883         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5884         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5885         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5886         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5887         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5888         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5889         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5890         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5891         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5892         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5893         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5894         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5895         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5896         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5897         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5898         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5899         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5900         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5901         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5902         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5903         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5904         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5905         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5906         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5907         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5908         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5909         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5910         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5911         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5912         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5913         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5914         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5915         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5916         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5917         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5918         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5919         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5920         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5921         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5922         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5923         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5924         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5925         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5926 };
5927
5928 static const u32 tg3TsoFwRodata[] = {
5929         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5930         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5931         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5932         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5933         0x00000000,
5934 };
5935
5936 static const u32 tg3TsoFwData[] = {
5937         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5938         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5939         0x00000000,
5940 };
5941
5942 /* 5705 needs a special version of the TSO firmware.  */
5943 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5944 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5945 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5946 #define TG3_TSO5_FW_START_ADDR          0x00010000
5947 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5948 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5949 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5950 #define TG3_TSO5_FW_RODATA_LEN          0x50
5951 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5952 #define TG3_TSO5_FW_DATA_LEN            0x20
5953 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5954 #define TG3_TSO5_FW_SBSS_LEN            0x28
5955 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5956 #define TG3_TSO5_FW_BSS_LEN             0x88
5957
5958 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5959         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5960         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5961         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5962         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5963         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5964         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5965         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5966         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5967         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5968         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5969         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5970         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5971         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5972         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5973         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5974         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5975         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5976         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5977         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5978         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5979         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5980         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5981         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5982         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5983         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5984         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5985         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5986         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5987         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5988         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5989         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5990         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5991         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5992         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5993         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5994         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5995         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5996         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5997         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5998         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5999         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6000         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6001         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6002         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6003         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6004         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6005         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6006         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6007         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6008         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6009         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6010         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6011         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6012         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6013         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6014         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6015         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6016         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6017         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6018         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6019         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6020         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6021         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6022         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6023         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6024         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6025         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6026         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6027         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6028         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6029         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6030         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6031         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6032         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6033         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6034         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6035         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6036         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6037         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6038         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6039         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6040         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6041         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6042         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6043         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6044         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6045         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6046         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6047         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6048         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6049         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6050         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6051         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6052         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6053         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6054         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6055         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6056         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6057         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6058         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6059         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6060         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6061         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6062         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6063         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6064         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6065         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6066         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6067         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6068         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6069         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6070         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6071         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6072         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6073         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6074         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6075         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6076         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6077         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6078         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6079         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6080         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6081         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6082         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6083         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6084         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6085         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6086         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6087         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6088         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6089         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6090         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6091         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6092         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6093         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6094         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6095         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6096         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6097         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6098         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6099         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6100         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6101         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6102         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6103         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6104         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6105         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6106         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6107         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6108         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6109         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6110         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6111         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6112         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6113         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6114         0x00000000, 0x00000000, 0x00000000,
6115 };
6116
6117 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6118         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6119         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6120         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6121         0x00000000, 0x00000000, 0x00000000,
6122 };
6123
6124 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6125         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6126         0x00000000, 0x00000000, 0x00000000,
6127 };
6128
6129 /* tp->lock is held. */
6130 static int tg3_load_tso_firmware(struct tg3 *tp)
6131 {
6132         struct fw_info info;
6133         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6134         int err, i;
6135
6136         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6137                 return 0;
6138
6139         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6140                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6141                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6142                 info.text_data = &tg3Tso5FwText[0];
6143                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6144                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6145                 info.rodata_data = &tg3Tso5FwRodata[0];
6146                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6147                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6148                 info.data_data = &tg3Tso5FwData[0];
6149                 cpu_base = RX_CPU_BASE;
6150                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6151                 cpu_scratch_size = (info.text_len +
6152                                     info.rodata_len +
6153                                     info.data_len +
6154                                     TG3_TSO5_FW_SBSS_LEN +
6155                                     TG3_TSO5_FW_BSS_LEN);
6156         } else {
6157                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6158                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6159                 info.text_data = &tg3TsoFwText[0];
6160                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6161                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6162                 info.rodata_data = &tg3TsoFwRodata[0];
6163                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6164                 info.data_len = TG3_TSO_FW_DATA_LEN;
6165                 info.data_data = &tg3TsoFwData[0];
6166                 cpu_base = TX_CPU_BASE;
6167                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6168                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6169         }
6170
6171         err = tg3_load_firmware_cpu(tp, cpu_base,
6172                                     cpu_scratch_base, cpu_scratch_size,
6173                                     &info);
6174         if (err)
6175                 return err;
6176
6177         /* Now startup the cpu. */
6178         tw32(cpu_base + CPU_STATE, 0xffffffff);
6179         tw32_f(cpu_base + CPU_PC,    info.text_base);
6180
6181         for (i = 0; i < 5; i++) {
6182                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6183                         break;
6184                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6185                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6186                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6187                 udelay(1000);
6188         }
6189         if (i >= 5) {
6190                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6191                        "to set CPU PC, is %08x should be %08x\n",
6192                        tp->dev->name, tr32(cpu_base + CPU_PC),
6193                        info.text_base);
6194                 return -ENODEV;
6195         }
6196         tw32(cpu_base + CPU_STATE, 0xffffffff);
6197         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6198         return 0;
6199 }
6200
6201
6202 /* tp->lock is held. */
6203 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6204 {
6205         u32 addr_high, addr_low;
6206         int i;
6207
6208         addr_high = ((tp->dev->dev_addr[0] << 8) |
6209                      tp->dev->dev_addr[1]);
6210         addr_low = ((tp->dev->dev_addr[2] << 24) |
6211                     (tp->dev->dev_addr[3] << 16) |
6212                     (tp->dev->dev_addr[4] <<  8) |
6213                     (tp->dev->dev_addr[5] <<  0));
6214         for (i = 0; i < 4; i++) {
6215                 if (i == 1 && skip_mac_1)
6216                         continue;
6217                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6218                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6219         }
6220
6221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6223                 for (i = 0; i < 12; i++) {
6224                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6225                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6226                 }
6227         }
6228
6229         addr_high = (tp->dev->dev_addr[0] +
6230                      tp->dev->dev_addr[1] +
6231                      tp->dev->dev_addr[2] +
6232                      tp->dev->dev_addr[3] +
6233                      tp->dev->dev_addr[4] +
6234                      tp->dev->dev_addr[5]) &
6235                 TX_BACKOFF_SEED_MASK;
6236         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6237 }
6238
6239 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6240 {
6241         struct tg3 *tp = netdev_priv(dev);
6242         struct sockaddr *addr = p;
6243         int err = 0, skip_mac_1 = 0;
6244
6245         if (!is_valid_ether_addr(addr->sa_data))
6246                 return -EINVAL;
6247
6248         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6249
6250         if (!netif_running(dev))
6251                 return 0;
6252
6253         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6254                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6255
6256                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6257                 addr0_low = tr32(MAC_ADDR_0_LOW);
6258                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6259                 addr1_low = tr32(MAC_ADDR_1_LOW);
6260
6261                 /* Skip MAC addr 1 if ASF is using it. */
6262                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6263                     !(addr1_high == 0 && addr1_low == 0))
6264                         skip_mac_1 = 1;
6265         }
6266         spin_lock_bh(&tp->lock);
6267         __tg3_set_mac_addr(tp, skip_mac_1);
6268         spin_unlock_bh(&tp->lock);
6269
6270         return err;
6271 }
6272
6273 /* tp->lock is held. */
6274 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6275                            dma_addr_t mapping, u32 maxlen_flags,
6276                            u32 nic_addr)
6277 {
6278         tg3_write_mem(tp,
6279                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6280                       ((u64) mapping >> 32));
6281         tg3_write_mem(tp,
6282                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6283                       ((u64) mapping & 0xffffffff));
6284         tg3_write_mem(tp,
6285                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6286                        maxlen_flags);
6287
6288         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6289                 tg3_write_mem(tp,
6290                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6291                               nic_addr);
6292 }
6293
6294 static void __tg3_set_rx_mode(struct net_device *);
6295 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6296 {
6297         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6298         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6299         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6300         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6301         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6302                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6303                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6304         }
6305         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6306         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6307         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6308                 u32 val = ec->stats_block_coalesce_usecs;
6309
6310                 if (!netif_carrier_ok(tp->dev))
6311                         val = 0;
6312
6313                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6314         }
6315 }
6316
6317 /* tp->lock is held. */
6318 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6319 {
6320         u32 val, rdmac_mode;
6321         int i, err, limit;
6322
6323         tg3_disable_ints(tp);
6324
6325         tg3_stop_fw(tp);
6326
6327         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6328
6329         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6330                 tg3_abort_hw(tp, 1);
6331         }
6332
6333         if (reset_phy)
6334                 tg3_phy_reset(tp);
6335
6336         err = tg3_chip_reset(tp);
6337         if (err)
6338                 return err;
6339
6340         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6341
6342         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6343                 val = tr32(TG3_CPMU_CTRL);
6344                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6345                 tw32(TG3_CPMU_CTRL, val);
6346         }
6347
6348         /* This works around an issue with Athlon chipsets on
6349          * B3 tigon3 silicon.  This bit has no effect on any
6350          * other revision.  But do not set this on PCI Express
6351          * chips and don't even touch the clocks if the CPMU is present.
6352          */
6353         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6354                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6355                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6356                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6357         }
6358
6359         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6360             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6361                 val = tr32(TG3PCI_PCISTATE);
6362                 val |= PCISTATE_RETRY_SAME_DMA;
6363                 tw32(TG3PCI_PCISTATE, val);
6364         }
6365
6366         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6367                 /* Allow reads and writes to the
6368                  * APE register and memory space.
6369                  */
6370                 val = tr32(TG3PCI_PCISTATE);
6371                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6372                        PCISTATE_ALLOW_APE_SHMEM_WR;
6373                 tw32(TG3PCI_PCISTATE, val);
6374         }
6375
6376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6377                 /* Enable some hw fixes.  */
6378                 val = tr32(TG3PCI_MSI_DATA);
6379                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6380                 tw32(TG3PCI_MSI_DATA, val);
6381         }
6382
6383         /* Descriptor ring init may make accesses to the
6384          * NIC SRAM area to setup the TX descriptors, so we
6385          * can only do this after the hardware has been
6386          * successfully reset.
6387          */
6388         err = tg3_init_rings(tp);
6389         if (err)
6390                 return err;
6391
6392         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6393             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6394                 /* This value is determined during the probe time DMA
6395                  * engine test, tg3_test_dma.
6396                  */
6397                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6398         }
6399
6400         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6401                           GRC_MODE_4X_NIC_SEND_RINGS |
6402                           GRC_MODE_NO_TX_PHDR_CSUM |
6403                           GRC_MODE_NO_RX_PHDR_CSUM);
6404         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6405
6406         /* Pseudo-header checksum is done by hardware logic and not
6407          * the offload processers, so make the chip do the pseudo-
6408          * header checksums on receive.  For transmit it is more
6409          * convenient to do the pseudo-header checksum in software
6410          * as Linux does that on transmit for us in all cases.
6411          */
6412         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6413
6414         tw32(GRC_MODE,
6415              tp->grc_mode |
6416              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6417
6418         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6419         val = tr32(GRC_MISC_CFG);
6420         val &= ~0xff;
6421         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6422         tw32(GRC_MISC_CFG, val);
6423
6424         /* Initialize MBUF/DESC pool. */
6425         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6426                 /* Do nothing.  */
6427         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6428                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6429                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6430                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6431                 else
6432                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6433                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6434                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6435         }
6436         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6437                 int fw_len;
6438
6439                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6440                           TG3_TSO5_FW_RODATA_LEN +
6441                           TG3_TSO5_FW_DATA_LEN +
6442                           TG3_TSO5_FW_SBSS_LEN +
6443                           TG3_TSO5_FW_BSS_LEN);
6444                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6445                 tw32(BUFMGR_MB_POOL_ADDR,
6446                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6447                 tw32(BUFMGR_MB_POOL_SIZE,
6448                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6449         }
6450
6451         if (tp->dev->mtu <= ETH_DATA_LEN) {
6452                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6453                      tp->bufmgr_config.mbuf_read_dma_low_water);
6454                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6455                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6456                 tw32(BUFMGR_MB_HIGH_WATER,
6457                      tp->bufmgr_config.mbuf_high_water);
6458         } else {
6459                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6460                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6461                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6462                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6463                 tw32(BUFMGR_MB_HIGH_WATER,
6464                      tp->bufmgr_config.mbuf_high_water_jumbo);
6465         }
6466         tw32(BUFMGR_DMA_LOW_WATER,
6467              tp->bufmgr_config.dma_low_water);
6468         tw32(BUFMGR_DMA_HIGH_WATER,
6469              tp->bufmgr_config.dma_high_water);
6470
6471         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6472         for (i = 0; i < 2000; i++) {
6473                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6474                         break;
6475                 udelay(10);
6476         }
6477         if (i >= 2000) {
6478                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6479                        tp->dev->name);
6480                 return -ENODEV;
6481         }
6482
6483         /* Setup replenish threshold. */
6484         val = tp->rx_pending / 8;
6485         if (val == 0)
6486                 val = 1;
6487         else if (val > tp->rx_std_max_post)
6488                 val = tp->rx_std_max_post;
6489         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6490                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6491                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6492
6493                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6494                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6495         }
6496
6497         tw32(RCVBDI_STD_THRESH, val);
6498
6499         /* Initialize TG3_BDINFO's at:
6500          *  RCVDBDI_STD_BD:     standard eth size rx ring
6501          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6502          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6503          *
6504          * like so:
6505          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6506          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6507          *                              ring attribute flags
6508          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6509          *
6510          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6511          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6512          *
6513          * The size of each ring is fixed in the firmware, but the location is
6514          * configurable.
6515          */
6516         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6517              ((u64) tp->rx_std_mapping >> 32));
6518         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6519              ((u64) tp->rx_std_mapping & 0xffffffff));
6520         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6521              NIC_SRAM_RX_BUFFER_DESC);
6522
6523         /* Don't even try to program the JUMBO/MINI buffer descriptor
6524          * configs on 5705.
6525          */
6526         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6527                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6528                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6529         } else {
6530                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6531                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6532
6533                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6534                      BDINFO_FLAGS_DISABLED);
6535
6536                 /* Setup replenish threshold. */
6537                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6538
6539                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6540                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6541                              ((u64) tp->rx_jumbo_mapping >> 32));
6542                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6543                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6544                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6545                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6546                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6547                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6548                 } else {
6549                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6550                              BDINFO_FLAGS_DISABLED);
6551                 }
6552
6553         }
6554
6555         /* There is only one send ring on 5705/5750, no need to explicitly
6556          * disable the others.
6557          */
6558         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6559                 /* Clear out send RCB ring in SRAM. */
6560                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6561                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6562                                       BDINFO_FLAGS_DISABLED);
6563         }
6564
6565         tp->tx_prod = 0;
6566         tp->tx_cons = 0;
6567         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6568         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6569
6570         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6571                        tp->tx_desc_mapping,
6572                        (TG3_TX_RING_SIZE <<
6573                         BDINFO_FLAGS_MAXLEN_SHIFT),
6574                        NIC_SRAM_TX_BUFFER_DESC);
6575
6576         /* There is only one receive return ring on 5705/5750, no need
6577          * to explicitly disable the others.
6578          */
6579         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6580                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6581                      i += TG3_BDINFO_SIZE) {
6582                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6583                                       BDINFO_FLAGS_DISABLED);
6584                 }
6585         }
6586
6587         tp->rx_rcb_ptr = 0;
6588         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6589
6590         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6591                        tp->rx_rcb_mapping,
6592                        (TG3_RX_RCB_RING_SIZE(tp) <<
6593                         BDINFO_FLAGS_MAXLEN_SHIFT),
6594                        0);
6595
6596         tp->rx_std_ptr = tp->rx_pending;
6597         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6598                      tp->rx_std_ptr);
6599
6600         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6601                                                 tp->rx_jumbo_pending : 0;
6602         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6603                      tp->rx_jumbo_ptr);
6604
6605         /* Initialize MAC address and backoff seed. */
6606         __tg3_set_mac_addr(tp, 0);
6607
6608         /* MTU + ethernet header + FCS + optional VLAN tag */
6609         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6610
6611         /* The slot time is changed by tg3_setup_phy if we
6612          * run at gigabit with half duplex.
6613          */
6614         tw32(MAC_TX_LENGTHS,
6615              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6616              (6 << TX_LENGTHS_IPG_SHIFT) |
6617              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6618
6619         /* Receive rules. */
6620         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6621         tw32(RCVLPC_CONFIG, 0x0181);
6622
6623         /* Calculate RDMAC_MODE setting early, we need it to determine
6624          * the RCVLPC_STATE_ENABLE mask.
6625          */
6626         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6627                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6628                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6629                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6630                       RDMAC_MODE_LNGREAD_ENAB);
6631
6632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6633                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6634                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6635                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6636
6637         /* If statement applies to 5705 and 5750 PCI devices only */
6638         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6639              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6640             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6641                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6642                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6643                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6644                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6645                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6646                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6647                 }
6648         }
6649
6650         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6651                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6652
6653         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6654                 rdmac_mode |= (1 << 27);
6655
6656         /* Receive/send statistics. */
6657         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6658                 val = tr32(RCVLPC_STATS_ENABLE);
6659                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6660                 tw32(RCVLPC_STATS_ENABLE, val);
6661         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6662                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6663                 val = tr32(RCVLPC_STATS_ENABLE);
6664                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6665                 tw32(RCVLPC_STATS_ENABLE, val);
6666         } else {
6667                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6668         }
6669         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6670         tw32(SNDDATAI_STATSENAB, 0xffffff);
6671         tw32(SNDDATAI_STATSCTRL,
6672              (SNDDATAI_SCTRL_ENABLE |
6673               SNDDATAI_SCTRL_FASTUPD));
6674
6675         /* Setup host coalescing engine. */
6676         tw32(HOSTCC_MODE, 0);
6677         for (i = 0; i < 2000; i++) {
6678                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6679                         break;
6680                 udelay(10);
6681         }
6682
6683         __tg3_set_coalesce(tp, &tp->coal);
6684
6685         /* set status block DMA address */
6686         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6687              ((u64) tp->status_mapping >> 32));
6688         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6689              ((u64) tp->status_mapping & 0xffffffff));
6690
6691         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6692                 /* Status/statistics block address.  See tg3_timer,
6693                  * the tg3_periodic_fetch_stats call there, and
6694                  * tg3_get_stats to see how this works for 5705/5750 chips.
6695                  */
6696                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6697                      ((u64) tp->stats_mapping >> 32));
6698                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6699                      ((u64) tp->stats_mapping & 0xffffffff));
6700                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6701                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6702         }
6703
6704         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6705
6706         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6707         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6708         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6709                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6710
6711         /* Clear statistics/status block in chip, and status block in ram. */
6712         for (i = NIC_SRAM_STATS_BLK;
6713              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6714              i += sizeof(u32)) {
6715                 tg3_write_mem(tp, i, 0);
6716                 udelay(40);
6717         }
6718         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6719
6720         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6721                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6722                 /* reset to prevent losing 1st rx packet intermittently */
6723                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6724                 udelay(10);
6725         }
6726
6727         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6728                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6729         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6730             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6731             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6732                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6733         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6734         udelay(40);
6735
6736         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6737          * If TG3_FLG2_IS_NIC is zero, we should read the
6738          * register to preserve the GPIO settings for LOMs. The GPIOs,
6739          * whether used as inputs or outputs, are set by boot code after
6740          * reset.
6741          */
6742         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6743                 u32 gpio_mask;
6744
6745                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6746                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6747                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6748
6749                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6750                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6751                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6752
6753                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6754                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6755
6756                 tp->grc_local_ctrl &= ~gpio_mask;
6757                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6758
6759                 /* GPIO1 must be driven high for eeprom write protect */
6760                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6761                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6762                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6763         }
6764         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6765         udelay(100);
6766
6767         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6768         tp->last_tag = 0;
6769
6770         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6771                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6772                 udelay(40);
6773         }
6774
6775         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6776                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6777                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6778                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6779                WDMAC_MODE_LNGREAD_ENAB);
6780
6781         /* If statement applies to 5705 and 5750 PCI devices only */
6782         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6783              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6784             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6785                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6786                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6787                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6788                         /* nothing */
6789                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6790                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6791                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6792                         val |= WDMAC_MODE_RX_ACCEL;
6793                 }
6794         }
6795
6796         /* Enable host coalescing bug fix */
6797         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6798             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6800             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6801                 val |= (1 << 29);
6802
6803         tw32_f(WDMAC_MODE, val);
6804         udelay(40);
6805
6806         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6807                 u16 pcix_cmd;
6808
6809                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6810                                      &pcix_cmd);
6811                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6812                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6813                         pcix_cmd |= PCI_X_CMD_READ_2K;
6814                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6815                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6816                         pcix_cmd |= PCI_X_CMD_READ_2K;
6817                 }
6818                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6819                                       pcix_cmd);
6820         }
6821
6822         tw32_f(RDMAC_MODE, rdmac_mode);
6823         udelay(40);
6824
6825         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6826         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6827                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6828
6829         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6830                 tw32(SNDDATAC_MODE,
6831                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6832         else
6833                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6834
6835         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6836         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6837         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6838         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6839         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6840                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6841         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6842         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6843
6844         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6845                 err = tg3_load_5701_a0_firmware_fix(tp);
6846                 if (err)
6847                         return err;
6848         }
6849
6850         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6851                 err = tg3_load_tso_firmware(tp);
6852                 if (err)
6853                         return err;
6854         }
6855
6856         tp->tx_mode = TX_MODE_ENABLE;
6857         tw32_f(MAC_TX_MODE, tp->tx_mode);
6858         udelay(100);
6859
6860         tp->rx_mode = RX_MODE_ENABLE;
6861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6862             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6863                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6864
6865         tw32_f(MAC_RX_MODE, tp->rx_mode);
6866         udelay(10);
6867
6868         if (tp->link_config.phy_is_low_power) {
6869                 tp->link_config.phy_is_low_power = 0;
6870                 tp->link_config.speed = tp->link_config.orig_speed;
6871                 tp->link_config.duplex = tp->link_config.orig_duplex;
6872                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6873         }
6874
6875         tp->mi_mode = MAC_MI_MODE_BASE;
6876         tw32_f(MAC_MI_MODE, tp->mi_mode);
6877         udelay(80);
6878
6879         tw32(MAC_LED_CTRL, tp->led_ctrl);
6880
6881         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6882         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6883                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6884                 udelay(10);
6885         }
6886         tw32_f(MAC_RX_MODE, tp->rx_mode);
6887         udelay(10);
6888
6889         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6890                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6891                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6892                         /* Set drive transmission level to 1.2V  */
6893                         /* only if the signal pre-emphasis bit is not set  */
6894                         val = tr32(MAC_SERDES_CFG);
6895                         val &= 0xfffff000;
6896                         val |= 0x880;
6897                         tw32(MAC_SERDES_CFG, val);
6898                 }
6899                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6900                         tw32(MAC_SERDES_CFG, 0x616000);
6901         }
6902
6903         /* Prevent chip from dropping frames when flow control
6904          * is enabled.
6905          */
6906         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6907
6908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6909             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6910                 /* Use hardware link auto-negotiation */
6911                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6912         }
6913
6914         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6915             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6916                 u32 tmp;
6917
6918                 tmp = tr32(SERDES_RX_CTRL);
6919                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6920                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6921                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6922                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6923         }
6924
6925         err = tg3_setup_phy(tp, 0);
6926         if (err)
6927                 return err;
6928
6929         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6930             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6931                 u32 tmp;
6932
6933                 /* Clear CRC stats. */
6934                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6935                         tg3_writephy(tp, MII_TG3_TEST1,
6936                                      tmp | MII_TG3_TEST1_CRC_EN);
6937                         tg3_readphy(tp, 0x14, &tmp);
6938                 }
6939         }
6940
6941         __tg3_set_rx_mode(tp->dev);
6942
6943         /* Initialize receive rules. */
6944         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6945         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6946         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6947         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6948
6949         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6950             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6951                 limit = 8;
6952         else
6953                 limit = 16;
6954         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6955                 limit -= 4;
6956         switch (limit) {
6957         case 16:
6958                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6959         case 15:
6960                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6961         case 14:
6962                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6963         case 13:
6964                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6965         case 12:
6966                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6967         case 11:
6968                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6969         case 10:
6970                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6971         case 9:
6972                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6973         case 8:
6974                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6975         case 7:
6976                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6977         case 6:
6978                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6979         case 5:
6980                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6981         case 4:
6982                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6983         case 3:
6984                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6985         case 2:
6986         case 1:
6987
6988         default:
6989                 break;
6990         };
6991
6992         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6993                 /* Write our heartbeat update interval to APE. */
6994                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6995                                 APE_HOST_HEARTBEAT_INT_DISABLE);
6996
6997         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6998
6999         return 0;
7000 }
7001
7002 /* Called at device open time to get the chip ready for
7003  * packet processing.  Invoked with tp->lock held.
7004  */
7005 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7006 {
7007         int err;
7008
7009         /* Force the chip into D0. */
7010         err = tg3_set_power_state(tp, PCI_D0);
7011         if (err)
7012                 goto out;
7013
7014         tg3_switch_clocks(tp);
7015
7016         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7017
7018         err = tg3_reset_hw(tp, reset_phy);
7019
7020 out:
7021         return err;
7022 }
7023
7024 #define TG3_STAT_ADD32(PSTAT, REG) \
7025 do {    u32 __val = tr32(REG); \
7026         (PSTAT)->low += __val; \
7027         if ((PSTAT)->low < __val) \
7028                 (PSTAT)->high += 1; \
7029 } while (0)
7030
7031 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7032 {
7033         struct tg3_hw_stats *sp = tp->hw_stats;
7034
7035         if (!netif_carrier_ok(tp->dev))
7036                 return;
7037
7038         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7039         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7040         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7041         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7042         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7043         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7044         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7045         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7046         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7047         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7048         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7049         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7050         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7051
7052         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7053         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7054         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7055         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7056         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7057         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7058         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7059         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7060         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7061         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7062         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7063         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7064         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7065         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7066
7067         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7068         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7069         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7070 }
7071
7072 static void tg3_timer(unsigned long __opaque)
7073 {
7074         struct tg3 *tp = (struct tg3 *) __opaque;
7075
7076         if (tp->irq_sync)
7077                 goto restart_timer;
7078
7079         spin_lock(&tp->lock);
7080
7081         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7082                 /* All of this garbage is because when using non-tagged
7083                  * IRQ status the mailbox/status_block protocol the chip
7084                  * uses with the cpu is race prone.
7085                  */
7086                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7087                         tw32(GRC_LOCAL_CTRL,
7088                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7089                 } else {
7090                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7091                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7092                 }
7093
7094                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7095                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7096                         spin_unlock(&tp->lock);
7097                         schedule_work(&tp->reset_task);
7098                         return;
7099                 }
7100         }
7101
7102         /* This part only runs once per second. */
7103         if (!--tp->timer_counter) {
7104                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7105                         tg3_periodic_fetch_stats(tp);
7106
7107                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7108                         u32 mac_stat;
7109                         int phy_event;
7110
7111                         mac_stat = tr32(MAC_STATUS);
7112
7113                         phy_event = 0;
7114                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7115                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7116                                         phy_event = 1;
7117                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7118                                 phy_event = 1;
7119
7120                         if (phy_event)
7121                                 tg3_setup_phy(tp, 0);
7122                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7123                         u32 mac_stat = tr32(MAC_STATUS);
7124                         int need_setup = 0;
7125
7126                         if (netif_carrier_ok(tp->dev) &&
7127                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7128                                 need_setup = 1;
7129                         }
7130                         if (! netif_carrier_ok(tp->dev) &&
7131                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7132                                          MAC_STATUS_SIGNAL_DET))) {
7133                                 need_setup = 1;
7134                         }
7135                         if (need_setup) {
7136                                 if (!tp->serdes_counter) {
7137                                         tw32_f(MAC_MODE,
7138                                              (tp->mac_mode &
7139                                               ~MAC_MODE_PORT_MODE_MASK));
7140                                         udelay(40);
7141                                         tw32_f(MAC_MODE, tp->mac_mode);
7142                                         udelay(40);
7143                                 }
7144                                 tg3_setup_phy(tp, 0);
7145                         }
7146                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7147                         tg3_serdes_parallel_detect(tp);
7148
7149                 tp->timer_counter = tp->timer_multiplier;
7150         }
7151
7152         /* Heartbeat is only sent once every 2 seconds.
7153          *
7154          * The heartbeat is to tell the ASF firmware that the host
7155          * driver is still alive.  In the event that the OS crashes,
7156          * ASF needs to reset the hardware to free up the FIFO space
7157          * that may be filled with rx packets destined for the host.
7158          * If the FIFO is full, ASF will no longer function properly.
7159          *
7160          * Unintended resets have been reported on real time kernels
7161          * where the timer doesn't run on time.  Netpoll will also have
7162          * same problem.
7163          *
7164          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7165          * to check the ring condition when the heartbeat is expiring
7166          * before doing the reset.  This will prevent most unintended
7167          * resets.
7168          */
7169         if (!--tp->asf_counter) {
7170                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7171                         u32 val;
7172
7173                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7174                                       FWCMD_NICDRV_ALIVE3);
7175                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7176                         /* 5 seconds timeout */
7177                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7178                         val = tr32(GRC_RX_CPU_EVENT);
7179                         val |= (1 << 14);
7180                         tw32(GRC_RX_CPU_EVENT, val);
7181                 }
7182                 tp->asf_counter = tp->asf_multiplier;
7183         }
7184
7185         spin_unlock(&tp->lock);
7186
7187 restart_timer:
7188         tp->timer.expires = jiffies + tp->timer_offset;
7189         add_timer(&tp->timer);
7190 }
7191
7192 static int tg3_request_irq(struct tg3 *tp)
7193 {
7194         irq_handler_t fn;
7195         unsigned long flags;
7196         struct net_device *dev = tp->dev;
7197
7198         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7199                 fn = tg3_msi;
7200                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7201                         fn = tg3_msi_1shot;
7202                 flags = IRQF_SAMPLE_RANDOM;
7203         } else {
7204                 fn = tg3_interrupt;
7205                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7206                         fn = tg3_interrupt_tagged;
7207                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7208         }
7209         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7210 }
7211
7212 static int tg3_test_interrupt(struct tg3 *tp)
7213 {
7214         struct net_device *dev = tp->dev;
7215         int err, i, intr_ok = 0;
7216
7217         if (!netif_running(dev))
7218                 return -ENODEV;
7219
7220         tg3_disable_ints(tp);
7221
7222         free_irq(tp->pdev->irq, dev);
7223
7224         err = request_irq(tp->pdev->irq, tg3_test_isr,
7225                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7226         if (err)
7227                 return err;
7228
7229         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7230         tg3_enable_ints(tp);
7231
7232         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7233                HOSTCC_MODE_NOW);
7234
7235         for (i = 0; i < 5; i++) {
7236                 u32 int_mbox, misc_host_ctrl;
7237
7238                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7239                                         TG3_64BIT_REG_LOW);
7240                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7241
7242                 if ((int_mbox != 0) ||
7243                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7244                         intr_ok = 1;
7245                         break;
7246                 }
7247
7248                 msleep(10);
7249         }
7250
7251         tg3_disable_ints(tp);
7252
7253         free_irq(tp->pdev->irq, dev);
7254
7255         err = tg3_request_irq(tp);
7256
7257         if (err)
7258                 return err;
7259
7260         if (intr_ok)
7261                 return 0;
7262
7263         return -EIO;
7264 }
7265
7266 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7267  * successfully restored
7268  */
7269 static int tg3_test_msi(struct tg3 *tp)
7270 {
7271         struct net_device *dev = tp->dev;
7272         int err;
7273         u16 pci_cmd;
7274
7275         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7276                 return 0;
7277
7278         /* Turn off SERR reporting in case MSI terminates with Master
7279          * Abort.
7280          */
7281         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7282         pci_write_config_word(tp->pdev, PCI_COMMAND,
7283                               pci_cmd & ~PCI_COMMAND_SERR);
7284
7285         err = tg3_test_interrupt(tp);
7286
7287         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7288
7289         if (!err)
7290                 return 0;
7291
7292         /* other failures */
7293         if (err != -EIO)
7294                 return err;
7295
7296         /* MSI test failed, go back to INTx mode */
7297         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7298                "switching to INTx mode. Please report this failure to "
7299                "the PCI maintainer and include system chipset information.\n",
7300                        tp->dev->name);
7301
7302         free_irq(tp->pdev->irq, dev);
7303         pci_disable_msi(tp->pdev);
7304
7305         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7306
7307         err = tg3_request_irq(tp);
7308         if (err)
7309                 return err;
7310
7311         /* Need to reset the chip because the MSI cycle may have terminated
7312          * with Master Abort.
7313          */
7314         tg3_full_lock(tp, 1);
7315
7316         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7317         err = tg3_init_hw(tp, 1);
7318
7319         tg3_full_unlock(tp);
7320
7321         if (err)
7322                 free_irq(tp->pdev->irq, dev);
7323
7324         return err;
7325 }
7326
7327 static int tg3_open(struct net_device *dev)
7328 {
7329         struct tg3 *tp = netdev_priv(dev);
7330         int err;
7331
7332         netif_carrier_off(tp->dev);
7333
7334         tg3_full_lock(tp, 0);
7335
7336         err = tg3_set_power_state(tp, PCI_D0);
7337         if (err) {
7338                 tg3_full_unlock(tp);
7339                 return err;
7340         }
7341
7342         tg3_disable_ints(tp);
7343         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7344
7345         tg3_full_unlock(tp);
7346
7347         /* The placement of this call is tied
7348          * to the setup and use of Host TX descriptors.
7349          */
7350         err = tg3_alloc_consistent(tp);
7351         if (err)
7352                 return err;
7353
7354         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7355                 /* All MSI supporting chips should support tagged
7356                  * status.  Assert that this is the case.
7357                  */
7358                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7359                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7360                                "Not using MSI.\n", tp->dev->name);
7361                 } else if (pci_enable_msi(tp->pdev) == 0) {
7362                         u32 msi_mode;
7363
7364                         /* Hardware bug - MSI won't work if INTX disabled. */
7365                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7366                                 pci_intx(tp->pdev, 1);
7367
7368                         msi_mode = tr32(MSGINT_MODE);
7369                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7370                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7371                 }
7372         }
7373         err = tg3_request_irq(tp);
7374
7375         if (err) {
7376                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7377                         pci_disable_msi(tp->pdev);
7378                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7379                 }
7380                 tg3_free_consistent(tp);
7381                 return err;
7382         }
7383
7384         napi_enable(&tp->napi);
7385
7386         tg3_full_lock(tp, 0);
7387
7388         err = tg3_init_hw(tp, 1);
7389         if (err) {
7390                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7391                 tg3_free_rings(tp);
7392         } else {
7393                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7394                         tp->timer_offset = HZ;
7395                 else
7396                         tp->timer_offset = HZ / 10;
7397
7398                 BUG_ON(tp->timer_offset > HZ);
7399                 tp->timer_counter = tp->timer_multiplier =
7400                         (HZ / tp->timer_offset);
7401                 tp->asf_counter = tp->asf_multiplier =
7402                         ((HZ / tp->timer_offset) * 2);
7403
7404                 init_timer(&tp->timer);
7405                 tp->timer.expires = jiffies + tp->timer_offset;
7406                 tp->timer.data = (unsigned long) tp;
7407                 tp->timer.function = tg3_timer;
7408         }
7409
7410         tg3_full_unlock(tp);
7411
7412         if (err) {
7413                 napi_disable(&tp->napi);
7414                 free_irq(tp->pdev->irq, dev);
7415                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7416                         pci_disable_msi(tp->pdev);
7417                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7418                 }
7419                 tg3_free_consistent(tp);
7420                 return err;
7421         }
7422
7423         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7424                 err = tg3_test_msi(tp);
7425
7426                 if (err) {
7427                         tg3_full_lock(tp, 0);
7428
7429                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7430                                 pci_disable_msi(tp->pdev);
7431                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7432                         }
7433                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7434                         tg3_free_rings(tp);
7435                         tg3_free_consistent(tp);
7436
7437                         tg3_full_unlock(tp);
7438
7439                         napi_disable(&tp->napi);
7440
7441                         return err;
7442                 }
7443
7444                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7445                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7446                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7447
7448                                 tw32(PCIE_TRANSACTION_CFG,
7449                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7450                         }
7451                 }
7452         }
7453
7454         tg3_full_lock(tp, 0);
7455
7456         add_timer(&tp->timer);
7457         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7458         tg3_enable_ints(tp);
7459
7460         tg3_full_unlock(tp);
7461
7462         netif_start_queue(dev);
7463
7464         return 0;
7465 }
7466
7467 #if 0
7468 /*static*/ void tg3_dump_state(struct tg3 *tp)
7469 {
7470         u32 val32, val32_2, val32_3, val32_4, val32_5;
7471         u16 val16;
7472         int i;
7473
7474         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7475         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7476         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7477                val16, val32);
7478
7479         /* MAC block */
7480         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7481                tr32(MAC_MODE), tr32(MAC_STATUS));
7482         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7483                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7484         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7485                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7486         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7487                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7488
7489         /* Send data initiator control block */
7490         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7491                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7492         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7493                tr32(SNDDATAI_STATSCTRL));
7494
7495         /* Send data completion control block */
7496         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7497
7498         /* Send BD ring selector block */
7499         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7500                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7501
7502         /* Send BD initiator control block */
7503         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7504                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7505
7506         /* Send BD completion control block */
7507         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7508
7509         /* Receive list placement control block */
7510         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7511                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7512         printk("       RCVLPC_STATSCTRL[%08x]\n",
7513                tr32(RCVLPC_STATSCTRL));
7514
7515         /* Receive data and receive BD initiator control block */
7516         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7517                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7518
7519         /* Receive data completion control block */
7520         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7521                tr32(RCVDCC_MODE));
7522
7523         /* Receive BD initiator control block */
7524         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7525                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7526
7527         /* Receive BD completion control block */
7528         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7529                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7530
7531         /* Receive list selector control block */
7532         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7533                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7534
7535         /* Mbuf cluster free block */
7536         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7537                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7538
7539         /* Host coalescing control block */
7540         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7541                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7542         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7543                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7544                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7545         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7546                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7547                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7548         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7549                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7550         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7551                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7552
7553         /* Memory arbiter control block */
7554         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7555                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7556
7557         /* Buffer manager control block */
7558         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7559                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7560         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7561                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7562         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7563                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7564                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7565                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7566
7567         /* Read DMA control block */
7568         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7569                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7570
7571         /* Write DMA control block */
7572         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7573                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7574
7575         /* DMA completion block */
7576         printk("DEBUG: DMAC_MODE[%08x]\n",
7577                tr32(DMAC_MODE));
7578
7579         /* GRC block */
7580         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7581                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7582         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7583                tr32(GRC_LOCAL_CTRL));
7584
7585         /* TG3_BDINFOs */
7586         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7587                tr32(RCVDBDI_JUMBO_BD + 0x0),
7588                tr32(RCVDBDI_JUMBO_BD + 0x4),
7589                tr32(RCVDBDI_JUMBO_BD + 0x8),
7590                tr32(RCVDBDI_JUMBO_BD + 0xc));
7591         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7592                tr32(RCVDBDI_STD_BD + 0x0),
7593                tr32(RCVDBDI_STD_BD + 0x4),
7594                tr32(RCVDBDI_STD_BD + 0x8),
7595                tr32(RCVDBDI_STD_BD + 0xc));
7596         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7597                tr32(RCVDBDI_MINI_BD + 0x0),
7598                tr32(RCVDBDI_MINI_BD + 0x4),
7599                tr32(RCVDBDI_MINI_BD + 0x8),
7600                tr32(RCVDBDI_MINI_BD + 0xc));
7601
7602         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7603         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7604         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7605         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7606         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7607                val32, val32_2, val32_3, val32_4);
7608
7609         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7610         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7611         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7612         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7613         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7614                val32, val32_2, val32_3, val32_4);
7615
7616         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7617         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7618         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7619         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7620         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7621         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7622                val32, val32_2, val32_3, val32_4, val32_5);
7623
7624         /* SW status block */
7625         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7626                tp->hw_status->status,
7627                tp->hw_status->status_tag,
7628                tp->hw_status->rx_jumbo_consumer,
7629                tp->hw_status->rx_consumer,
7630                tp->hw_status->rx_mini_consumer,
7631                tp->hw_status->idx[0].rx_producer,
7632                tp->hw_status->idx[0].tx_consumer);
7633
7634         /* SW statistics block */
7635         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7636                ((u32 *)tp->hw_stats)[0],
7637                ((u32 *)tp->hw_stats)[1],
7638                ((u32 *)tp->hw_stats)[2],
7639                ((u32 *)tp->hw_stats)[3]);
7640
7641         /* Mailboxes */
7642         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7643                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7644                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7645                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7646                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7647
7648         /* NIC side send descriptors. */
7649         for (i = 0; i < 6; i++) {
7650                 unsigned long txd;
7651
7652                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7653                         + (i * sizeof(struct tg3_tx_buffer_desc));
7654                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7655                        i,
7656                        readl(txd + 0x0), readl(txd + 0x4),
7657                        readl(txd + 0x8), readl(txd + 0xc));
7658         }
7659
7660         /* NIC side RX descriptors. */
7661         for (i = 0; i < 6; i++) {
7662                 unsigned long rxd;
7663
7664                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7665                         + (i * sizeof(struct tg3_rx_buffer_desc));
7666                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7667                        i,
7668                        readl(rxd + 0x0), readl(rxd + 0x4),
7669                        readl(rxd + 0x8), readl(rxd + 0xc));
7670                 rxd += (4 * sizeof(u32));
7671                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7672                        i,
7673                        readl(rxd + 0x0), readl(rxd + 0x4),
7674                        readl(rxd + 0x8), readl(rxd + 0xc));
7675         }
7676
7677         for (i = 0; i < 6; i++) {
7678                 unsigned long rxd;
7679
7680                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7681                         + (i * sizeof(struct tg3_rx_buffer_desc));
7682                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7683                        i,
7684                        readl(rxd + 0x0), readl(rxd + 0x4),
7685                        readl(rxd + 0x8), readl(rxd + 0xc));
7686                 rxd += (4 * sizeof(u32));
7687                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7688                        i,
7689                        readl(rxd + 0x0), readl(rxd + 0x4),
7690                        readl(rxd + 0x8), readl(rxd + 0xc));
7691         }
7692 }
7693 #endif
7694
7695 static struct net_device_stats *tg3_get_stats(struct net_device *);
7696 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7697
7698 static int tg3_close(struct net_device *dev)
7699 {
7700         struct tg3 *tp = netdev_priv(dev);
7701
7702         napi_disable(&tp->napi);
7703         cancel_work_sync(&tp->reset_task);
7704
7705         netif_stop_queue(dev);
7706
7707         del_timer_sync(&tp->timer);
7708
7709         tg3_full_lock(tp, 1);
7710 #if 0
7711         tg3_dump_state(tp);
7712 #endif
7713
7714         tg3_disable_ints(tp);
7715
7716         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7717         tg3_free_rings(tp);
7718         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7719
7720         tg3_full_unlock(tp);
7721
7722         free_irq(tp->pdev->irq, dev);
7723         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7724                 pci_disable_msi(tp->pdev);
7725                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7726         }
7727
7728         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7729                sizeof(tp->net_stats_prev));
7730         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7731                sizeof(tp->estats_prev));
7732
7733         tg3_free_consistent(tp);
7734
7735         tg3_set_power_state(tp, PCI_D3hot);
7736
7737         netif_carrier_off(tp->dev);
7738
7739         return 0;
7740 }
7741
7742 static inline unsigned long get_stat64(tg3_stat64_t *val)
7743 {
7744         unsigned long ret;
7745
7746 #if (BITS_PER_LONG == 32)
7747         ret = val->low;
7748 #else
7749         ret = ((u64)val->high << 32) | ((u64)val->low);
7750 #endif
7751         return ret;
7752 }
7753
7754 static unsigned long calc_crc_errors(struct tg3 *tp)
7755 {
7756         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7757
7758         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7759             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7760              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7761                 u32 val;
7762
7763                 spin_lock_bh(&tp->lock);
7764                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7765                         tg3_writephy(tp, MII_TG3_TEST1,
7766                                      val | MII_TG3_TEST1_CRC_EN);
7767                         tg3_readphy(tp, 0x14, &val);
7768                 } else
7769                         val = 0;
7770                 spin_unlock_bh(&tp->lock);
7771
7772                 tp->phy_crc_errors += val;
7773
7774                 return tp->phy_crc_errors;
7775         }
7776
7777         return get_stat64(&hw_stats->rx_fcs_errors);
7778 }
7779
7780 #define ESTAT_ADD(member) \
7781         estats->member =        old_estats->member + \
7782                                 get_stat64(&hw_stats->member)
7783
7784 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7785 {
7786         struct tg3_ethtool_stats *estats = &tp->estats;
7787         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7788         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7789
7790         if (!hw_stats)
7791                 return old_estats;
7792
7793         ESTAT_ADD(rx_octets);
7794         ESTAT_ADD(rx_fragments);
7795         ESTAT_ADD(rx_ucast_packets);
7796         ESTAT_ADD(rx_mcast_packets);
7797         ESTAT_ADD(rx_bcast_packets);
7798         ESTAT_ADD(rx_fcs_errors);
7799         ESTAT_ADD(rx_align_errors);
7800         ESTAT_ADD(rx_xon_pause_rcvd);
7801         ESTAT_ADD(rx_xoff_pause_rcvd);
7802         ESTAT_ADD(rx_mac_ctrl_rcvd);
7803         ESTAT_ADD(rx_xoff_entered);
7804         ESTAT_ADD(rx_frame_too_long_errors);
7805         ESTAT_ADD(rx_jabbers);
7806         ESTAT_ADD(rx_undersize_packets);
7807         ESTAT_ADD(rx_in_length_errors);
7808         ESTAT_ADD(rx_out_length_errors);
7809         ESTAT_ADD(rx_64_or_less_octet_packets);
7810         ESTAT_ADD(rx_65_to_127_octet_packets);
7811         ESTAT_ADD(rx_128_to_255_octet_packets);
7812         ESTAT_ADD(rx_256_to_511_octet_packets);
7813         ESTAT_ADD(rx_512_to_1023_octet_packets);
7814         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7815         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7816         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7817         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7818         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7819
7820         ESTAT_ADD(tx_octets);
7821         ESTAT_ADD(tx_collisions);
7822         ESTAT_ADD(tx_xon_sent);
7823         ESTAT_ADD(tx_xoff_sent);
7824         ESTAT_ADD(tx_flow_control);
7825         ESTAT_ADD(tx_mac_errors);
7826         ESTAT_ADD(tx_single_collisions);
7827         ESTAT_ADD(tx_mult_collisions);
7828         ESTAT_ADD(tx_deferred);
7829         ESTAT_ADD(tx_excessive_collisions);
7830         ESTAT_ADD(tx_late_collisions);
7831         ESTAT_ADD(tx_collide_2times);
7832         ESTAT_ADD(tx_collide_3times);
7833         ESTAT_ADD(tx_collide_4times);
7834         ESTAT_ADD(tx_collide_5times);
7835         ESTAT_ADD(tx_collide_6times);
7836         ESTAT_ADD(tx_collide_7times);
7837         ESTAT_ADD(tx_collide_8times);
7838         ESTAT_ADD(tx_collide_9times);
7839         ESTAT_ADD(tx_collide_10times);
7840         ESTAT_ADD(tx_collide_11times);
7841         ESTAT_ADD(tx_collide_12times);
7842         ESTAT_ADD(tx_collide_13times);
7843         ESTAT_ADD(tx_collide_14times);
7844         ESTAT_ADD(tx_collide_15times);
7845         ESTAT_ADD(tx_ucast_packets);
7846         ESTAT_ADD(tx_mcast_packets);
7847         ESTAT_ADD(tx_bcast_packets);
7848         ESTAT_ADD(tx_carrier_sense_errors);
7849         ESTAT_ADD(tx_discards);
7850         ESTAT_ADD(tx_errors);
7851
7852         ESTAT_ADD(dma_writeq_full);
7853         ESTAT_ADD(dma_write_prioq_full);
7854         ESTAT_ADD(rxbds_empty);
7855         ESTAT_ADD(rx_discards);
7856         ESTAT_ADD(rx_errors);
7857         ESTAT_ADD(rx_threshold_hit);
7858
7859         ESTAT_ADD(dma_readq_full);
7860         ESTAT_ADD(dma_read_prioq_full);
7861         ESTAT_ADD(tx_comp_queue_full);
7862
7863         ESTAT_ADD(ring_set_send_prod_index);
7864         ESTAT_ADD(ring_status_update);
7865         ESTAT_ADD(nic_irqs);
7866         ESTAT_ADD(nic_avoided_irqs);
7867         ESTAT_ADD(nic_tx_threshold_hit);
7868
7869         return estats;
7870 }
7871
7872 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7873 {
7874         struct tg3 *tp = netdev_priv(dev);
7875         struct net_device_stats *stats = &tp->net_stats;
7876         struct net_device_stats *old_stats = &tp->net_stats_prev;
7877         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7878
7879         if (!hw_stats)
7880                 return old_stats;
7881
7882         stats->rx_packets = old_stats->rx_packets +
7883                 get_stat64(&hw_stats->rx_ucast_packets) +
7884                 get_stat64(&hw_stats->rx_mcast_packets) +
7885                 get_stat64(&hw_stats->rx_bcast_packets);
7886
7887         stats->tx_packets = old_stats->tx_packets +
7888                 get_stat64(&hw_stats->tx_ucast_packets) +
7889                 get_stat64(&hw_stats->tx_mcast_packets) +
7890                 get_stat64(&hw_stats->tx_bcast_packets);
7891
7892         stats->rx_bytes = old_stats->rx_bytes +
7893                 get_stat64(&hw_stats->rx_octets);
7894         stats->tx_bytes = old_stats->tx_bytes +
7895                 get_stat64(&hw_stats->tx_octets);
7896
7897         stats->rx_errors = old_stats->rx_errors +
7898                 get_stat64(&hw_stats->rx_errors);
7899         stats->tx_errors = old_stats->tx_errors +
7900                 get_stat64(&hw_stats->tx_errors) +
7901                 get_stat64(&hw_stats->tx_mac_errors) +
7902                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7903                 get_stat64(&hw_stats->tx_discards);
7904
7905         stats->multicast = old_stats->multicast +
7906                 get_stat64(&hw_stats->rx_mcast_packets);
7907         stats->collisions = old_stats->collisions +
7908                 get_stat64(&hw_stats->tx_collisions);
7909
7910         stats->rx_length_errors = old_stats->rx_length_errors +
7911                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7912                 get_stat64(&hw_stats->rx_undersize_packets);
7913
7914         stats->rx_over_errors = old_stats->rx_over_errors +
7915                 get_stat64(&hw_stats->rxbds_empty);
7916         stats->rx_frame_errors = old_stats->rx_frame_errors +
7917                 get_stat64(&hw_stats->rx_align_errors);
7918         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7919                 get_stat64(&hw_stats->tx_discards);
7920         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7921                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7922
7923         stats->rx_crc_errors = old_stats->rx_crc_errors +
7924                 calc_crc_errors(tp);
7925
7926         stats->rx_missed_errors = old_stats->rx_missed_errors +
7927                 get_stat64(&hw_stats->rx_discards);
7928
7929         return stats;
7930 }
7931
7932 static inline u32 calc_crc(unsigned char *buf, int len)
7933 {
7934         u32 reg;
7935         u32 tmp;
7936         int j, k;
7937
7938         reg = 0xffffffff;
7939
7940         for (j = 0; j < len; j++) {
7941                 reg ^= buf[j];
7942
7943                 for (k = 0; k < 8; k++) {
7944                         tmp = reg & 0x01;
7945
7946                         reg >>= 1;
7947
7948                         if (tmp) {
7949                                 reg ^= 0xedb88320;
7950                         }
7951                 }
7952         }
7953
7954         return ~reg;
7955 }
7956
7957 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7958 {
7959         /* accept or reject all multicast frames */
7960         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7961         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7962         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7963         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7964 }
7965
7966 static void __tg3_set_rx_mode(struct net_device *dev)
7967 {
7968         struct tg3 *tp = netdev_priv(dev);
7969         u32 rx_mode;
7970
7971         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7972                                   RX_MODE_KEEP_VLAN_TAG);
7973
7974         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7975          * flag clear.
7976          */
7977 #if TG3_VLAN_TAG_USED
7978         if (!tp->vlgrp &&
7979             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7980                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7981 #else
7982         /* By definition, VLAN is disabled always in this
7983          * case.
7984          */
7985         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7986                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7987 #endif
7988
7989         if (dev->flags & IFF_PROMISC) {
7990                 /* Promiscuous mode. */
7991                 rx_mode |= RX_MODE_PROMISC;
7992         } else if (dev->flags & IFF_ALLMULTI) {
7993                 /* Accept all multicast. */
7994                 tg3_set_multi (tp, 1);
7995         } else if (dev->mc_count < 1) {
7996                 /* Reject all multicast. */
7997                 tg3_set_multi (tp, 0);
7998         } else {
7999                 /* Accept one or more multicast(s). */
8000                 struct dev_mc_list *mclist;
8001                 unsigned int i;
8002                 u32 mc_filter[4] = { 0, };
8003                 u32 regidx;
8004                 u32 bit;
8005                 u32 crc;
8006
8007                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8008                      i++, mclist = mclist->next) {
8009
8010                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8011                         bit = ~crc & 0x7f;
8012                         regidx = (bit & 0x60) >> 5;
8013                         bit &= 0x1f;
8014                         mc_filter[regidx] |= (1 << bit);
8015                 }
8016
8017                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8018                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8019                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8020                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8021         }
8022
8023         if (rx_mode != tp->rx_mode) {
8024                 tp->rx_mode = rx_mode;
8025                 tw32_f(MAC_RX_MODE, rx_mode);
8026                 udelay(10);
8027         }
8028 }
8029
8030 static void tg3_set_rx_mode(struct net_device *dev)
8031 {
8032         struct tg3 *tp = netdev_priv(dev);
8033
8034         if (!netif_running(dev))
8035                 return;
8036
8037         tg3_full_lock(tp, 0);
8038         __tg3_set_rx_mode(dev);
8039         tg3_full_unlock(tp);
8040 }
8041
8042 #define TG3_REGDUMP_LEN         (32 * 1024)
8043
8044 static int tg3_get_regs_len(struct net_device *dev)
8045 {
8046         return TG3_REGDUMP_LEN;
8047 }
8048
8049 static void tg3_get_regs(struct net_device *dev,
8050                 struct ethtool_regs *regs, void *_p)
8051 {
8052         u32 *p = _p;
8053         struct tg3 *tp = netdev_priv(dev);
8054         u8 *orig_p = _p;
8055         int i;
8056
8057         regs->version = 0;
8058
8059         memset(p, 0, TG3_REGDUMP_LEN);
8060
8061         if (tp->link_config.phy_is_low_power)
8062                 return;
8063
8064         tg3_full_lock(tp, 0);
8065
8066 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8067 #define GET_REG32_LOOP(base,len)                \
8068 do {    p = (u32 *)(orig_p + (base));           \
8069         for (i = 0; i < len; i += 4)            \
8070                 __GET_REG32((base) + i);        \
8071 } while (0)
8072 #define GET_REG32_1(reg)                        \
8073 do {    p = (u32 *)(orig_p + (reg));            \
8074         __GET_REG32((reg));                     \
8075 } while (0)
8076
8077         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8078         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8079         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8080         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8081         GET_REG32_1(SNDDATAC_MODE);
8082         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8083         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8084         GET_REG32_1(SNDBDC_MODE);
8085         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8086         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8087         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8088         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8089         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8090         GET_REG32_1(RCVDCC_MODE);
8091         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8092         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8093         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8094         GET_REG32_1(MBFREE_MODE);
8095         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8096         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8097         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8098         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8099         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8100         GET_REG32_1(RX_CPU_MODE);
8101         GET_REG32_1(RX_CPU_STATE);
8102         GET_REG32_1(RX_CPU_PGMCTR);
8103         GET_REG32_1(RX_CPU_HWBKPT);
8104         GET_REG32_1(TX_CPU_MODE);
8105         GET_REG32_1(TX_CPU_STATE);
8106         GET_REG32_1(TX_CPU_PGMCTR);
8107         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8108         GET_REG32_LOOP(FTQ_RESET, 0x120);
8109         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8110         GET_REG32_1(DMAC_MODE);
8111         GET_REG32_LOOP(GRC_MODE, 0x4c);
8112         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8113                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8114
8115 #undef __GET_REG32
8116 #undef GET_REG32_LOOP
8117 #undef GET_REG32_1
8118
8119         tg3_full_unlock(tp);
8120 }
8121
8122 static int tg3_get_eeprom_len(struct net_device *dev)
8123 {
8124         struct tg3 *tp = netdev_priv(dev);
8125
8126         return tp->nvram_size;
8127 }
8128
8129 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8130 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8131
8132 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8133 {
8134         struct tg3 *tp = netdev_priv(dev);
8135         int ret;
8136         u8  *pd;
8137         u32 i, offset, len, val, b_offset, b_count;
8138
8139         if (tp->link_config.phy_is_low_power)
8140                 return -EAGAIN;
8141
8142         offset = eeprom->offset;
8143         len = eeprom->len;
8144         eeprom->len = 0;
8145
8146         eeprom->magic = TG3_EEPROM_MAGIC;
8147
8148         if (offset & 3) {
8149                 /* adjustments to start on required 4 byte boundary */
8150                 b_offset = offset & 3;
8151                 b_count = 4 - b_offset;
8152                 if (b_count > len) {
8153                         /* i.e. offset=1 len=2 */
8154                         b_count = len;
8155                 }
8156                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8157                 if (ret)
8158                         return ret;
8159                 val = cpu_to_le32(val);
8160                 memcpy(data, ((char*)&val) + b_offset, b_count);
8161                 len -= b_count;
8162                 offset += b_count;
8163                 eeprom->len += b_count;
8164         }
8165
8166         /* read bytes upto the last 4 byte boundary */
8167         pd = &data[eeprom->len];
8168         for (i = 0; i < (len - (len & 3)); i += 4) {
8169                 ret = tg3_nvram_read(tp, offset + i, &val);
8170                 if (ret) {
8171                         eeprom->len += i;
8172                         return ret;
8173                 }
8174                 val = cpu_to_le32(val);
8175                 memcpy(pd + i, &val, 4);
8176         }
8177         eeprom->len += i;
8178
8179         if (len & 3) {
8180                 /* read last bytes not ending on 4 byte boundary */
8181                 pd = &data[eeprom->len];
8182                 b_count = len & 3;
8183                 b_offset = offset + len - b_count;
8184                 ret = tg3_nvram_read(tp, b_offset, &val);
8185                 if (ret)
8186                         return ret;
8187                 val = cpu_to_le32(val);
8188                 memcpy(pd, ((char*)&val), b_count);
8189                 eeprom->len += b_count;
8190         }
8191         return 0;
8192 }
8193
8194 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8195
8196 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8197 {
8198         struct tg3 *tp = netdev_priv(dev);
8199         int ret;
8200         u32 offset, len, b_offset, odd_len, start, end;
8201         u8 *buf;
8202
8203         if (tp->link_config.phy_is_low_power)
8204                 return -EAGAIN;
8205
8206         if (eeprom->magic != TG3_EEPROM_MAGIC)
8207                 return -EINVAL;
8208
8209         offset = eeprom->offset;
8210         len = eeprom->len;
8211
8212         if ((b_offset = (offset & 3))) {
8213                 /* adjustments to start on required 4 byte boundary */
8214                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8215                 if (ret)
8216                         return ret;
8217                 start = cpu_to_le32(start);
8218                 len += b_offset;
8219                 offset &= ~3;
8220                 if (len < 4)
8221                         len = 4;
8222         }
8223
8224         odd_len = 0;
8225         if (len & 3) {
8226                 /* adjustments to end on required 4 byte boundary */
8227                 odd_len = 1;
8228                 len = (len + 3) & ~3;
8229                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8230                 if (ret)
8231                         return ret;
8232                 end = cpu_to_le32(end);
8233         }
8234
8235         buf = data;
8236         if (b_offset || odd_len) {
8237                 buf = kmalloc(len, GFP_KERNEL);
8238                 if (!buf)
8239                         return -ENOMEM;
8240                 if (b_offset)
8241                         memcpy(buf, &start, 4);
8242                 if (odd_len)
8243                         memcpy(buf+len-4, &end, 4);
8244                 memcpy(buf + b_offset, data, eeprom->len);
8245         }
8246
8247         ret = tg3_nvram_write_block(tp, offset, len, buf);
8248
8249         if (buf != data)
8250                 kfree(buf);
8251
8252         return ret;
8253 }
8254
8255 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8256 {
8257         struct tg3 *tp = netdev_priv(dev);
8258
8259         cmd->supported = (SUPPORTED_Autoneg);
8260
8261         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8262                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8263                                    SUPPORTED_1000baseT_Full);
8264
8265         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8266                 cmd->supported |= (SUPPORTED_100baseT_Half |
8267                                   SUPPORTED_100baseT_Full |
8268                                   SUPPORTED_10baseT_Half |
8269                                   SUPPORTED_10baseT_Full |
8270                                   SUPPORTED_MII);
8271                 cmd->port = PORT_TP;
8272         } else {
8273                 cmd->supported |= SUPPORTED_FIBRE;
8274                 cmd->port = PORT_FIBRE;
8275         }
8276
8277         cmd->advertising = tp->link_config.advertising;
8278         if (netif_running(dev)) {
8279                 cmd->speed = tp->link_config.active_speed;
8280                 cmd->duplex = tp->link_config.active_duplex;
8281         }
8282         cmd->phy_address = PHY_ADDR;
8283         cmd->transceiver = 0;
8284         cmd->autoneg = tp->link_config.autoneg;
8285         cmd->maxtxpkt = 0;
8286         cmd->maxrxpkt = 0;
8287         return 0;
8288 }
8289
8290 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8291 {
8292         struct tg3 *tp = netdev_priv(dev);
8293
8294         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8295                 /* These are the only valid advertisement bits allowed.  */
8296                 if (cmd->autoneg == AUTONEG_ENABLE &&
8297                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8298                                           ADVERTISED_1000baseT_Full |
8299                                           ADVERTISED_Autoneg |
8300                                           ADVERTISED_FIBRE)))
8301                         return -EINVAL;
8302                 /* Fiber can only do SPEED_1000.  */
8303                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8304                          (cmd->speed != SPEED_1000))
8305                         return -EINVAL;
8306         /* Copper cannot force SPEED_1000.  */
8307         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8308                    (cmd->speed == SPEED_1000))
8309                 return -EINVAL;
8310         else if ((cmd->speed == SPEED_1000) &&
8311                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8312                 return -EINVAL;
8313
8314         tg3_full_lock(tp, 0);
8315
8316         tp->link_config.autoneg = cmd->autoneg;
8317         if (cmd->autoneg == AUTONEG_ENABLE) {
8318                 tp->link_config.advertising = (cmd->advertising |
8319                                               ADVERTISED_Autoneg);
8320                 tp->link_config.speed = SPEED_INVALID;
8321                 tp->link_config.duplex = DUPLEX_INVALID;
8322         } else {
8323                 tp->link_config.advertising = 0;
8324                 tp->link_config.speed = cmd->speed;
8325                 tp->link_config.duplex = cmd->duplex;
8326         }
8327
8328         tp->link_config.orig_speed = tp->link_config.speed;
8329         tp->link_config.orig_duplex = tp->link_config.duplex;
8330         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8331
8332         if (netif_running(dev))
8333                 tg3_setup_phy(tp, 1);
8334
8335         tg3_full_unlock(tp);
8336
8337         return 0;
8338 }
8339
8340 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8341 {
8342         struct tg3 *tp = netdev_priv(dev);
8343
8344         strcpy(info->driver, DRV_MODULE_NAME);
8345         strcpy(info->version, DRV_MODULE_VERSION);
8346         strcpy(info->fw_version, tp->fw_ver);
8347         strcpy(info->bus_info, pci_name(tp->pdev));
8348 }
8349
8350 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8351 {
8352         struct tg3 *tp = netdev_priv(dev);
8353
8354         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8355                 wol->supported = WAKE_MAGIC;
8356         else
8357                 wol->supported = 0;
8358         wol->wolopts = 0;
8359         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8360                 wol->wolopts = WAKE_MAGIC;
8361         memset(&wol->sopass, 0, sizeof(wol->sopass));
8362 }
8363
8364 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8365 {
8366         struct tg3 *tp = netdev_priv(dev);
8367
8368         if (wol->wolopts & ~WAKE_MAGIC)
8369                 return -EINVAL;
8370         if ((wol->wolopts & WAKE_MAGIC) &&
8371             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8372                 return -EINVAL;
8373
8374         spin_lock_bh(&tp->lock);
8375         if (wol->wolopts & WAKE_MAGIC)
8376                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8377         else
8378                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8379         spin_unlock_bh(&tp->lock);
8380
8381         return 0;
8382 }
8383
8384 static u32 tg3_get_msglevel(struct net_device *dev)
8385 {
8386         struct tg3 *tp = netdev_priv(dev);
8387         return tp->msg_enable;
8388 }
8389
8390 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8391 {
8392         struct tg3 *tp = netdev_priv(dev);
8393         tp->msg_enable = value;
8394 }
8395
8396 static int tg3_set_tso(struct net_device *dev, u32 value)
8397 {
8398         struct tg3 *tp = netdev_priv(dev);
8399
8400         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8401                 if (value)
8402                         return -EINVAL;
8403                 return 0;
8404         }
8405         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8406             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8407                 if (value) {
8408                         dev->features |= NETIF_F_TSO6;
8409                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8410                                 dev->features |= NETIF_F_TSO_ECN;
8411                 } else
8412                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8413         }
8414         return ethtool_op_set_tso(dev, value);
8415 }
8416
8417 static int tg3_nway_reset(struct net_device *dev)
8418 {
8419         struct tg3 *tp = netdev_priv(dev);
8420         u32 bmcr;
8421         int r;
8422
8423         if (!netif_running(dev))
8424                 return -EAGAIN;
8425
8426         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8427                 return -EINVAL;
8428
8429         spin_lock_bh(&tp->lock);
8430         r = -EINVAL;
8431         tg3_readphy(tp, MII_BMCR, &bmcr);
8432         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8433             ((bmcr & BMCR_ANENABLE) ||
8434              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8435                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8436                                            BMCR_ANENABLE);
8437                 r = 0;
8438         }
8439         spin_unlock_bh(&tp->lock);
8440
8441         return r;
8442 }
8443
8444 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8445 {
8446         struct tg3 *tp = netdev_priv(dev);
8447
8448         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8449         ering->rx_mini_max_pending = 0;
8450         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8451                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8452         else
8453                 ering->rx_jumbo_max_pending = 0;
8454
8455         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8456
8457         ering->rx_pending = tp->rx_pending;
8458         ering->rx_mini_pending = 0;
8459         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8460                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8461         else
8462                 ering->rx_jumbo_pending = 0;
8463
8464         ering->tx_pending = tp->tx_pending;
8465 }
8466
8467 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8468 {
8469         struct tg3 *tp = netdev_priv(dev);
8470         int irq_sync = 0, err = 0;
8471
8472         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8473             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8474             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8475             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8476             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8477              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8478                 return -EINVAL;
8479
8480         if (netif_running(dev)) {
8481                 tg3_netif_stop(tp);
8482                 irq_sync = 1;
8483         }
8484
8485         tg3_full_lock(tp, irq_sync);
8486
8487         tp->rx_pending = ering->rx_pending;
8488
8489         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8490             tp->rx_pending > 63)
8491                 tp->rx_pending = 63;
8492         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8493         tp->tx_pending = ering->tx_pending;
8494
8495         if (netif_running(dev)) {
8496                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8497                 err = tg3_restart_hw(tp, 1);
8498                 if (!err)
8499                         tg3_netif_start(tp);
8500         }
8501
8502         tg3_full_unlock(tp);
8503
8504         return err;
8505 }
8506
8507 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8508 {
8509         struct tg3 *tp = netdev_priv(dev);
8510
8511         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8512         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8513         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8514 }
8515
8516 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8517 {
8518         struct tg3 *tp = netdev_priv(dev);
8519         int irq_sync = 0, err = 0;
8520
8521         if (netif_running(dev)) {
8522                 tg3_netif_stop(tp);
8523                 irq_sync = 1;
8524         }
8525
8526         tg3_full_lock(tp, irq_sync);
8527
8528         if (epause->autoneg)
8529                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8530         else
8531                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8532         if (epause->rx_pause)
8533                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8534         else
8535                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8536         if (epause->tx_pause)
8537                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8538         else
8539                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8540
8541         if (netif_running(dev)) {
8542                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8543                 err = tg3_restart_hw(tp, 1);
8544                 if (!err)
8545                         tg3_netif_start(tp);
8546         }
8547
8548         tg3_full_unlock(tp);
8549
8550         return err;
8551 }
8552
8553 static u32 tg3_get_rx_csum(struct net_device *dev)
8554 {
8555         struct tg3 *tp = netdev_priv(dev);
8556         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8557 }
8558
8559 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8560 {
8561         struct tg3 *tp = netdev_priv(dev);
8562
8563         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8564                 if (data != 0)
8565                         return -EINVAL;
8566                 return 0;
8567         }
8568
8569         spin_lock_bh(&tp->lock);
8570         if (data)
8571                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8572         else
8573                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8574         spin_unlock_bh(&tp->lock);
8575
8576         return 0;
8577 }
8578
8579 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8580 {
8581         struct tg3 *tp = netdev_priv(dev);
8582
8583         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8584                 if (data != 0)
8585                         return -EINVAL;
8586                 return 0;
8587         }
8588
8589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8593                 ethtool_op_set_tx_ipv6_csum(dev, data);
8594         else
8595                 ethtool_op_set_tx_csum(dev, data);
8596
8597         return 0;
8598 }
8599
8600 static int tg3_get_sset_count (struct net_device *dev, int sset)
8601 {
8602         switch (sset) {
8603         case ETH_SS_TEST:
8604                 return TG3_NUM_TEST;
8605         case ETH_SS_STATS:
8606                 return TG3_NUM_STATS;
8607         default:
8608                 return -EOPNOTSUPP;
8609         }
8610 }
8611
8612 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8613 {
8614         switch (stringset) {
8615         case ETH_SS_STATS:
8616                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8617                 break;
8618         case ETH_SS_TEST:
8619                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8620                 break;
8621         default:
8622                 WARN_ON(1);     /* we need a WARN() */
8623                 break;
8624         }
8625 }
8626
8627 static int tg3_phys_id(struct net_device *dev, u32 data)
8628 {
8629         struct tg3 *tp = netdev_priv(dev);
8630         int i;
8631
8632         if (!netif_running(tp->dev))
8633                 return -EAGAIN;
8634
8635         if (data == 0)
8636                 data = 2;
8637
8638         for (i = 0; i < (data * 2); i++) {
8639                 if ((i % 2) == 0)
8640                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8641                                            LED_CTRL_1000MBPS_ON |
8642                                            LED_CTRL_100MBPS_ON |
8643                                            LED_CTRL_10MBPS_ON |
8644                                            LED_CTRL_TRAFFIC_OVERRIDE |
8645                                            LED_CTRL_TRAFFIC_BLINK |
8646                                            LED_CTRL_TRAFFIC_LED);
8647
8648                 else
8649                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8650                                            LED_CTRL_TRAFFIC_OVERRIDE);
8651
8652                 if (msleep_interruptible(500))
8653                         break;
8654         }
8655         tw32(MAC_LED_CTRL, tp->led_ctrl);
8656         return 0;
8657 }
8658
8659 static void tg3_get_ethtool_stats (struct net_device *dev,
8660                                    struct ethtool_stats *estats, u64 *tmp_stats)
8661 {
8662         struct tg3 *tp = netdev_priv(dev);
8663         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8664 }
8665
8666 #define NVRAM_TEST_SIZE 0x100
8667 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8668 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8669 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8670
8671 static int tg3_test_nvram(struct tg3 *tp)
8672 {
8673         u32 *buf, csum, magic;
8674         int i, j, k, err = 0, size;
8675
8676         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8677                 return -EIO;
8678
8679         if (magic == TG3_EEPROM_MAGIC)
8680                 size = NVRAM_TEST_SIZE;
8681         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8682                 if ((magic & 0xe00000) == 0x200000)
8683                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8684                 else
8685                         return 0;
8686         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8687                 size = NVRAM_SELFBOOT_HW_SIZE;
8688         else
8689                 return -EIO;
8690
8691         buf = kmalloc(size, GFP_KERNEL);
8692         if (buf == NULL)
8693                 return -ENOMEM;
8694
8695         err = -EIO;
8696         for (i = 0, j = 0; i < size; i += 4, j++) {
8697                 u32 val;
8698
8699                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8700                         break;
8701                 buf[j] = cpu_to_le32(val);
8702         }
8703         if (i < size)
8704                 goto out;
8705
8706         /* Selfboot format */
8707         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8708             TG3_EEPROM_MAGIC_FW) {
8709                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8710
8711                 for (i = 0; i < size; i++)
8712                         csum8 += buf8[i];
8713
8714                 if (csum8 == 0) {
8715                         err = 0;
8716                         goto out;
8717                 }
8718
8719                 err = -EIO;
8720                 goto out;
8721         }
8722
8723         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8724             TG3_EEPROM_MAGIC_HW) {
8725                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8726                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8727                 u8 *buf8 = (u8 *) buf;
8728
8729                 /* Separate the parity bits and the data bytes.  */
8730                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8731                         if ((i == 0) || (i == 8)) {
8732                                 int l;
8733                                 u8 msk;
8734
8735                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8736                                         parity[k++] = buf8[i] & msk;
8737                                 i++;
8738                         }
8739                         else if (i == 16) {
8740                                 int l;
8741                                 u8 msk;
8742
8743                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8744                                         parity[k++] = buf8[i] & msk;
8745                                 i++;
8746
8747                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8748                                         parity[k++] = buf8[i] & msk;
8749                                 i++;
8750                         }
8751                         data[j++] = buf8[i];
8752                 }
8753
8754                 err = -EIO;
8755                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8756                         u8 hw8 = hweight8(data[i]);
8757
8758                         if ((hw8 & 0x1) && parity[i])
8759                                 goto out;
8760                         else if (!(hw8 & 0x1) && !parity[i])
8761                                 goto out;
8762                 }
8763                 err = 0;
8764                 goto out;
8765         }
8766
8767         /* Bootstrap checksum at offset 0x10 */
8768         csum = calc_crc((unsigned char *) buf, 0x10);
8769         if(csum != cpu_to_le32(buf[0x10/4]))
8770                 goto out;
8771
8772         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8773         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8774         if (csum != cpu_to_le32(buf[0xfc/4]))
8775                  goto out;
8776
8777         err = 0;
8778
8779 out:
8780         kfree(buf);
8781         return err;
8782 }
8783
8784 #define TG3_SERDES_TIMEOUT_SEC  2
8785 #define TG3_COPPER_TIMEOUT_SEC  6
8786
8787 static int tg3_test_link(struct tg3 *tp)
8788 {
8789         int i, max;
8790
8791         if (!netif_running(tp->dev))
8792                 return -ENODEV;
8793
8794         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8795                 max = TG3_SERDES_TIMEOUT_SEC;
8796         else
8797                 max = TG3_COPPER_TIMEOUT_SEC;
8798
8799         for (i = 0; i < max; i++) {
8800                 if (netif_carrier_ok(tp->dev))
8801                         return 0;
8802
8803                 if (msleep_interruptible(1000))
8804                         break;
8805         }
8806
8807         return -EIO;
8808 }
8809
8810 /* Only test the commonly used registers */
8811 static int tg3_test_registers(struct tg3 *tp)
8812 {
8813         int i, is_5705, is_5750;
8814         u32 offset, read_mask, write_mask, val, save_val, read_val;
8815         static struct {
8816                 u16 offset;
8817                 u16 flags;
8818 #define TG3_FL_5705     0x1
8819 #define TG3_FL_NOT_5705 0x2
8820 #define TG3_FL_NOT_5788 0x4
8821 #define TG3_FL_NOT_5750 0x8
8822                 u32 read_mask;
8823                 u32 write_mask;
8824         } reg_tbl[] = {
8825                 /* MAC Control Registers */
8826                 { MAC_MODE, TG3_FL_NOT_5705,
8827                         0x00000000, 0x00ef6f8c },
8828                 { MAC_MODE, TG3_FL_5705,
8829                         0x00000000, 0x01ef6b8c },
8830                 { MAC_STATUS, TG3_FL_NOT_5705,
8831                         0x03800107, 0x00000000 },
8832                 { MAC_STATUS, TG3_FL_5705,
8833                         0x03800100, 0x00000000 },
8834                 { MAC_ADDR_0_HIGH, 0x0000,
8835                         0x00000000, 0x0000ffff },
8836                 { MAC_ADDR_0_LOW, 0x0000,
8837                         0x00000000, 0xffffffff },
8838                 { MAC_RX_MTU_SIZE, 0x0000,
8839                         0x00000000, 0x0000ffff },
8840                 { MAC_TX_MODE, 0x0000,
8841                         0x00000000, 0x00000070 },
8842                 { MAC_TX_LENGTHS, 0x0000,
8843                         0x00000000, 0x00003fff },
8844                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8845                         0x00000000, 0x000007fc },
8846                 { MAC_RX_MODE, TG3_FL_5705,
8847                         0x00000000, 0x000007dc },
8848                 { MAC_HASH_REG_0, 0x0000,
8849                         0x00000000, 0xffffffff },
8850                 { MAC_HASH_REG_1, 0x0000,
8851                         0x00000000, 0xffffffff },
8852                 { MAC_HASH_REG_2, 0x0000,
8853                         0x00000000, 0xffffffff },
8854                 { MAC_HASH_REG_3, 0x0000,
8855                         0x00000000, 0xffffffff },
8856
8857                 /* Receive Data and Receive BD Initiator Control Registers. */
8858                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8859                         0x00000000, 0xffffffff },
8860                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8861                         0x00000000, 0xffffffff },
8862                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8863                         0x00000000, 0x00000003 },
8864                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8865                         0x00000000, 0xffffffff },
8866                 { RCVDBDI_STD_BD+0, 0x0000,
8867                         0x00000000, 0xffffffff },
8868                 { RCVDBDI_STD_BD+4, 0x0000,
8869                         0x00000000, 0xffffffff },
8870                 { RCVDBDI_STD_BD+8, 0x0000,
8871                         0x00000000, 0xffff0002 },
8872                 { RCVDBDI_STD_BD+0xc, 0x0000,
8873                         0x00000000, 0xffffffff },
8874
8875                 /* Receive BD Initiator Control Registers. */
8876                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8877                         0x00000000, 0xffffffff },
8878                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8879                         0x00000000, 0x000003ff },
8880                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8881                         0x00000000, 0xffffffff },
8882
8883                 /* Host Coalescing Control Registers. */
8884                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8885                         0x00000000, 0x00000004 },
8886                 { HOSTCC_MODE, TG3_FL_5705,
8887                         0x00000000, 0x000000f6 },
8888                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8889                         0x00000000, 0xffffffff },
8890                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8891                         0x00000000, 0x000003ff },
8892                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8893                         0x00000000, 0xffffffff },
8894                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8895                         0x00000000, 0x000003ff },
8896                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8897                         0x00000000, 0xffffffff },
8898                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8899                         0x00000000, 0x000000ff },
8900                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8901                         0x00000000, 0xffffffff },
8902                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8903                         0x00000000, 0x000000ff },
8904                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8905                         0x00000000, 0xffffffff },
8906                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8907                         0x00000000, 0xffffffff },
8908                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8909                         0x00000000, 0xffffffff },
8910                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8911                         0x00000000, 0x000000ff },
8912                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8913                         0x00000000, 0xffffffff },
8914                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8915                         0x00000000, 0x000000ff },
8916                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8917                         0x00000000, 0xffffffff },
8918                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8919                         0x00000000, 0xffffffff },
8920                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8921                         0x00000000, 0xffffffff },
8922                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8923                         0x00000000, 0xffffffff },
8924                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8925                         0x00000000, 0xffffffff },
8926                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8927                         0xffffffff, 0x00000000 },
8928                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8929                         0xffffffff, 0x00000000 },
8930
8931                 /* Buffer Manager Control Registers. */
8932                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8933                         0x00000000, 0x007fff80 },
8934                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8935                         0x00000000, 0x007fffff },
8936                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8937                         0x00000000, 0x0000003f },
8938                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8939                         0x00000000, 0x000001ff },
8940                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8941                         0x00000000, 0x000001ff },
8942                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8943                         0xffffffff, 0x00000000 },
8944                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8945                         0xffffffff, 0x00000000 },
8946
8947                 /* Mailbox Registers */
8948                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8949                         0x00000000, 0x000001ff },
8950                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8951                         0x00000000, 0x000001ff },
8952                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8953                         0x00000000, 0x000007ff },
8954                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8955                         0x00000000, 0x000001ff },
8956
8957                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8958         };
8959
8960         is_5705 = is_5750 = 0;
8961         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8962                 is_5705 = 1;
8963                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8964                         is_5750 = 1;
8965         }
8966
8967         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8968                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8969                         continue;
8970
8971                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8972                         continue;
8973
8974                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8975                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8976                         continue;
8977
8978                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8979                         continue;
8980
8981                 offset = (u32) reg_tbl[i].offset;
8982                 read_mask = reg_tbl[i].read_mask;
8983                 write_mask = reg_tbl[i].write_mask;
8984
8985                 /* Save the original register content */
8986                 save_val = tr32(offset);
8987
8988                 /* Determine the read-only value. */
8989                 read_val = save_val & read_mask;
8990
8991                 /* Write zero to the register, then make sure the read-only bits
8992                  * are not changed and the read/write bits are all zeros.
8993                  */
8994                 tw32(offset, 0);
8995
8996                 val = tr32(offset);
8997
8998                 /* Test the read-only and read/write bits. */
8999                 if (((val & read_mask) != read_val) || (val & write_mask))
9000                         goto out;
9001
9002                 /* Write ones to all the bits defined by RdMask and WrMask, then
9003                  * make sure the read-only bits are not changed and the
9004                  * read/write bits are all ones.
9005                  */
9006                 tw32(offset, read_mask | write_mask);
9007
9008                 val = tr32(offset);
9009
9010                 /* Test the read-only bits. */
9011                 if ((val & read_mask) != read_val)
9012                         goto out;
9013
9014                 /* Test the read/write bits. */
9015                 if ((val & write_mask) != write_mask)
9016                         goto out;
9017
9018                 tw32(offset, save_val);
9019         }
9020
9021         return 0;
9022
9023 out:
9024         if (netif_msg_hw(tp))
9025                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9026                        offset);
9027         tw32(offset, save_val);
9028         return -EIO;
9029 }
9030
9031 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9032 {
9033         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9034         int i;
9035         u32 j;
9036
9037         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9038                 for (j = 0; j < len; j += 4) {
9039                         u32 val;
9040
9041                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9042                         tg3_read_mem(tp, offset + j, &val);
9043                         if (val != test_pattern[i])
9044                                 return -EIO;
9045                 }
9046         }
9047         return 0;
9048 }
9049
9050 static int tg3_test_memory(struct tg3 *tp)
9051 {
9052         static struct mem_entry {
9053                 u32 offset;
9054                 u32 len;
9055         } mem_tbl_570x[] = {
9056                 { 0x00000000, 0x00b50},
9057                 { 0x00002000, 0x1c000},
9058                 { 0xffffffff, 0x00000}
9059         }, mem_tbl_5705[] = {
9060                 { 0x00000100, 0x0000c},
9061                 { 0x00000200, 0x00008},
9062                 { 0x00004000, 0x00800},
9063                 { 0x00006000, 0x01000},
9064                 { 0x00008000, 0x02000},
9065                 { 0x00010000, 0x0e000},
9066                 { 0xffffffff, 0x00000}
9067         }, mem_tbl_5755[] = {
9068                 { 0x00000200, 0x00008},
9069                 { 0x00004000, 0x00800},
9070                 { 0x00006000, 0x00800},
9071                 { 0x00008000, 0x02000},
9072                 { 0x00010000, 0x0c000},
9073                 { 0xffffffff, 0x00000}
9074         }, mem_tbl_5906[] = {
9075                 { 0x00000200, 0x00008},
9076                 { 0x00004000, 0x00400},
9077                 { 0x00006000, 0x00400},
9078                 { 0x00008000, 0x01000},
9079                 { 0x00010000, 0x01000},
9080                 { 0xffffffff, 0x00000}
9081         };
9082         struct mem_entry *mem_tbl;
9083         int err = 0;
9084         int i;
9085
9086         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9088                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9089                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9090                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9091                         mem_tbl = mem_tbl_5755;
9092                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9093                         mem_tbl = mem_tbl_5906;
9094                 else
9095                         mem_tbl = mem_tbl_5705;
9096         } else
9097                 mem_tbl = mem_tbl_570x;
9098
9099         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9100                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9101                     mem_tbl[i].len)) != 0)
9102                         break;
9103         }
9104
9105         return err;
9106 }
9107
9108 #define TG3_MAC_LOOPBACK        0
9109 #define TG3_PHY_LOOPBACK        1
9110
9111 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9112 {
9113         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9114         u32 desc_idx;
9115         struct sk_buff *skb, *rx_skb;
9116         u8 *tx_data;
9117         dma_addr_t map;
9118         int num_pkts, tx_len, rx_len, i, err;
9119         struct tg3_rx_buffer_desc *desc;
9120
9121         if (loopback_mode == TG3_MAC_LOOPBACK) {
9122                 /* HW errata - mac loopback fails in some cases on 5780.
9123                  * Normal traffic and PHY loopback are not affected by
9124                  * errata.
9125                  */
9126                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9127                         return 0;
9128
9129                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9130                            MAC_MODE_PORT_INT_LPBACK;
9131                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9132                         mac_mode |= MAC_MODE_LINK_POLARITY;
9133                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9134                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9135                 else
9136                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9137                 tw32(MAC_MODE, mac_mode);
9138         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9139                 u32 val;
9140
9141                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9142                         u32 phytest;
9143
9144                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9145                                 u32 phy;
9146
9147                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9148                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9149                                 if (!tg3_readphy(tp, 0x1b, &phy))
9150                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9151                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9152                         }
9153                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9154                 } else
9155                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9156
9157                 tg3_phy_toggle_automdix(tp, 0);
9158
9159                 tg3_writephy(tp, MII_BMCR, val);
9160                 udelay(40);
9161
9162                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9164                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9165                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9166                 } else
9167                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9168
9169                 /* reset to prevent losing 1st rx packet intermittently */
9170                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9171                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9172                         udelay(10);
9173                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9174                 }
9175                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9176                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9177                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9178                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9179                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9180                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9181                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9182                 }
9183                 tw32(MAC_MODE, mac_mode);
9184         }
9185         else
9186                 return -EINVAL;
9187
9188         err = -EIO;
9189
9190         tx_len = 1514;
9191         skb = netdev_alloc_skb(tp->dev, tx_len);
9192         if (!skb)
9193                 return -ENOMEM;
9194
9195         tx_data = skb_put(skb, tx_len);
9196         memcpy(tx_data, tp->dev->dev_addr, 6);
9197         memset(tx_data + 6, 0x0, 8);
9198
9199         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9200
9201         for (i = 14; i < tx_len; i++)
9202                 tx_data[i] = (u8) (i & 0xff);
9203
9204         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9205
9206         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9207              HOSTCC_MODE_NOW);
9208
9209         udelay(10);
9210
9211         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9212
9213         num_pkts = 0;
9214
9215         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9216
9217         tp->tx_prod++;
9218         num_pkts++;
9219
9220         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9221                      tp->tx_prod);
9222         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9223
9224         udelay(10);
9225
9226         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9227         for (i = 0; i < 25; i++) {
9228                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9229                        HOSTCC_MODE_NOW);
9230
9231                 udelay(10);
9232
9233                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9234                 rx_idx = tp->hw_status->idx[0].rx_producer;
9235                 if ((tx_idx == tp->tx_prod) &&
9236                     (rx_idx == (rx_start_idx + num_pkts)))
9237                         break;
9238         }
9239
9240         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9241         dev_kfree_skb(skb);
9242
9243         if (tx_idx != tp->tx_prod)
9244                 goto out;
9245
9246         if (rx_idx != rx_start_idx + num_pkts)
9247                 goto out;
9248
9249         desc = &tp->rx_rcb[rx_start_idx];
9250         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9251         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9252         if (opaque_key != RXD_OPAQUE_RING_STD)
9253                 goto out;
9254
9255         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9256             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9257                 goto out;
9258
9259         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9260         if (rx_len != tx_len)
9261                 goto out;
9262
9263         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9264
9265         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9266         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9267
9268         for (i = 14; i < tx_len; i++) {
9269                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9270                         goto out;
9271         }
9272         err = 0;
9273
9274         /* tg3_free_rings will unmap and free the rx_skb */
9275 out:
9276         return err;
9277 }
9278
9279 #define TG3_MAC_LOOPBACK_FAILED         1
9280 #define TG3_PHY_LOOPBACK_FAILED         2
9281 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9282                                          TG3_PHY_LOOPBACK_FAILED)
9283
9284 static int tg3_test_loopback(struct tg3 *tp)
9285 {
9286         int err = 0;
9287         u32 cpmuctrl = 0;
9288
9289         if (!netif_running(tp->dev))
9290                 return TG3_LOOPBACK_FAILED;
9291
9292         err = tg3_reset_hw(tp, 1);
9293         if (err)
9294                 return TG3_LOOPBACK_FAILED;
9295
9296         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9297                 int i;
9298                 u32 status;
9299
9300                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9301
9302                 /* Wait for up to 40 microseconds to acquire lock. */
9303                 for (i = 0; i < 4; i++) {
9304                         status = tr32(TG3_CPMU_MUTEX_GNT);
9305                         if (status == CPMU_MUTEX_GNT_DRIVER)
9306                                 break;
9307                         udelay(10);
9308                 }
9309
9310                 if (status != CPMU_MUTEX_GNT_DRIVER)
9311                         return TG3_LOOPBACK_FAILED;
9312
9313                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9314
9315                 /* Turn off power management based on link speed. */
9316                 tw32(TG3_CPMU_CTRL,
9317                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9318         }
9319
9320         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9321                 err |= TG3_MAC_LOOPBACK_FAILED;
9322
9323         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9324                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9325
9326                 /* Release the mutex */
9327                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9328         }
9329
9330         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9331                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9332                         err |= TG3_PHY_LOOPBACK_FAILED;
9333         }
9334
9335         return err;
9336 }
9337
9338 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9339                           u64 *data)
9340 {
9341         struct tg3 *tp = netdev_priv(dev);
9342
9343         if (tp->link_config.phy_is_low_power)
9344                 tg3_set_power_state(tp, PCI_D0);
9345
9346         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9347
9348         if (tg3_test_nvram(tp) != 0) {
9349                 etest->flags |= ETH_TEST_FL_FAILED;
9350                 data[0] = 1;
9351         }
9352         if (tg3_test_link(tp) != 0) {
9353                 etest->flags |= ETH_TEST_FL_FAILED;
9354                 data[1] = 1;
9355         }
9356         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9357                 int err, irq_sync = 0;
9358
9359                 if (netif_running(dev)) {
9360                         tg3_netif_stop(tp);
9361                         irq_sync = 1;
9362                 }
9363
9364                 tg3_full_lock(tp, irq_sync);
9365
9366                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9367                 err = tg3_nvram_lock(tp);
9368                 tg3_halt_cpu(tp, RX_CPU_BASE);
9369                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9370                         tg3_halt_cpu(tp, TX_CPU_BASE);
9371                 if (!err)
9372                         tg3_nvram_unlock(tp);
9373
9374                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9375                         tg3_phy_reset(tp);
9376
9377                 if (tg3_test_registers(tp) != 0) {
9378                         etest->flags |= ETH_TEST_FL_FAILED;
9379                         data[2] = 1;
9380                 }
9381                 if (tg3_test_memory(tp) != 0) {
9382                         etest->flags |= ETH_TEST_FL_FAILED;
9383                         data[3] = 1;
9384                 }
9385                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9386                         etest->flags |= ETH_TEST_FL_FAILED;
9387
9388                 tg3_full_unlock(tp);
9389
9390                 if (tg3_test_interrupt(tp) != 0) {
9391                         etest->flags |= ETH_TEST_FL_FAILED;
9392                         data[5] = 1;
9393                 }
9394
9395                 tg3_full_lock(tp, 0);
9396
9397                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9398                 if (netif_running(dev)) {
9399                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9400                         if (!tg3_restart_hw(tp, 1))
9401                                 tg3_netif_start(tp);
9402                 }
9403
9404                 tg3_full_unlock(tp);
9405         }
9406         if (tp->link_config.phy_is_low_power)
9407                 tg3_set_power_state(tp, PCI_D3hot);
9408
9409 }
9410
9411 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9412 {
9413         struct mii_ioctl_data *data = if_mii(ifr);
9414         struct tg3 *tp = netdev_priv(dev);
9415         int err;
9416
9417         switch(cmd) {
9418         case SIOCGMIIPHY:
9419                 data->phy_id = PHY_ADDR;
9420
9421                 /* fallthru */
9422         case SIOCGMIIREG: {
9423                 u32 mii_regval;
9424
9425                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9426                         break;                  /* We have no PHY */
9427
9428                 if (tp->link_config.phy_is_low_power)
9429                         return -EAGAIN;
9430
9431                 spin_lock_bh(&tp->lock);
9432                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9433                 spin_unlock_bh(&tp->lock);
9434
9435                 data->val_out = mii_regval;
9436
9437                 return err;
9438         }
9439
9440         case SIOCSMIIREG:
9441                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9442                         break;                  /* We have no PHY */
9443
9444                 if (!capable(CAP_NET_ADMIN))
9445                         return -EPERM;
9446
9447                 if (tp->link_config.phy_is_low_power)
9448                         return -EAGAIN;
9449
9450                 spin_lock_bh(&tp->lock);
9451                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9452                 spin_unlock_bh(&tp->lock);
9453
9454                 return err;
9455
9456         default:
9457                 /* do nothing */
9458                 break;
9459         }
9460         return -EOPNOTSUPP;
9461 }
9462
9463 #if TG3_VLAN_TAG_USED
9464 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9465 {
9466         struct tg3 *tp = netdev_priv(dev);
9467
9468         if (netif_running(dev))
9469                 tg3_netif_stop(tp);
9470
9471         tg3_full_lock(tp, 0);
9472
9473         tp->vlgrp = grp;
9474
9475         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9476         __tg3_set_rx_mode(dev);
9477
9478         if (netif_running(dev))
9479                 tg3_netif_start(tp);
9480
9481         tg3_full_unlock(tp);
9482 }
9483 #endif
9484
9485 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9486 {
9487         struct tg3 *tp = netdev_priv(dev);
9488
9489         memcpy(ec, &tp->coal, sizeof(*ec));
9490         return 0;
9491 }
9492
9493 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9494 {
9495         struct tg3 *tp = netdev_priv(dev);
9496         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9497         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9498
9499         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9500                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9501                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9502                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9503                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9504         }
9505
9506         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9507             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9508             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9509             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9510             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9511             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9512             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9513             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9514             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9515             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9516                 return -EINVAL;
9517
9518         /* No rx interrupts will be generated if both are zero */
9519         if ((ec->rx_coalesce_usecs == 0) &&
9520             (ec->rx_max_coalesced_frames == 0))
9521                 return -EINVAL;
9522
9523         /* No tx interrupts will be generated if both are zero */
9524         if ((ec->tx_coalesce_usecs == 0) &&
9525             (ec->tx_max_coalesced_frames == 0))
9526                 return -EINVAL;
9527
9528         /* Only copy relevant parameters, ignore all others. */
9529         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9530         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9531         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9532         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9533         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9534         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9535         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9536         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9537         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9538
9539         if (netif_running(dev)) {
9540                 tg3_full_lock(tp, 0);
9541                 __tg3_set_coalesce(tp, &tp->coal);
9542                 tg3_full_unlock(tp);
9543         }
9544         return 0;
9545 }
9546
9547 static const struct ethtool_ops tg3_ethtool_ops = {
9548         .get_settings           = tg3_get_settings,
9549         .set_settings           = tg3_set_settings,
9550         .get_drvinfo            = tg3_get_drvinfo,
9551         .get_regs_len           = tg3_get_regs_len,
9552         .get_regs               = tg3_get_regs,
9553         .get_wol                = tg3_get_wol,
9554         .set_wol                = tg3_set_wol,
9555         .get_msglevel           = tg3_get_msglevel,
9556         .set_msglevel           = tg3_set_msglevel,
9557         .nway_reset             = tg3_nway_reset,
9558         .get_link               = ethtool_op_get_link,
9559         .get_eeprom_len         = tg3_get_eeprom_len,
9560         .get_eeprom             = tg3_get_eeprom,
9561         .set_eeprom             = tg3_set_eeprom,
9562         .get_ringparam          = tg3_get_ringparam,
9563         .set_ringparam          = tg3_set_ringparam,
9564         .get_pauseparam         = tg3_get_pauseparam,
9565         .set_pauseparam         = tg3_set_pauseparam,
9566         .get_rx_csum            = tg3_get_rx_csum,
9567         .set_rx_csum            = tg3_set_rx_csum,
9568         .set_tx_csum            = tg3_set_tx_csum,
9569         .set_sg                 = ethtool_op_set_sg,
9570         .set_tso                = tg3_set_tso,
9571         .self_test              = tg3_self_test,
9572         .get_strings            = tg3_get_strings,
9573         .phys_id                = tg3_phys_id,
9574         .get_ethtool_stats      = tg3_get_ethtool_stats,
9575         .get_coalesce           = tg3_get_coalesce,
9576         .set_coalesce           = tg3_set_coalesce,
9577         .get_sset_count         = tg3_get_sset_count,
9578 };
9579
9580 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9581 {
9582         u32 cursize, val, magic;
9583
9584         tp->nvram_size = EEPROM_CHIP_SIZE;
9585
9586         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9587                 return;
9588
9589         if ((magic != TG3_EEPROM_MAGIC) &&
9590             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9591             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9592                 return;
9593
9594         /*
9595          * Size the chip by reading offsets at increasing powers of two.
9596          * When we encounter our validation signature, we know the addressing
9597          * has wrapped around, and thus have our chip size.
9598          */
9599         cursize = 0x10;
9600
9601         while (cursize < tp->nvram_size) {
9602                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9603                         return;
9604
9605                 if (val == magic)
9606                         break;
9607
9608                 cursize <<= 1;
9609         }
9610
9611         tp->nvram_size = cursize;
9612 }
9613
9614 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9615 {
9616         u32 val;
9617
9618         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9619                 return;
9620
9621         /* Selfboot format */
9622         if (val != TG3_EEPROM_MAGIC) {
9623                 tg3_get_eeprom_size(tp);
9624                 return;
9625         }
9626
9627         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9628                 if (val != 0) {
9629                         tp->nvram_size = (val >> 16) * 1024;
9630                         return;
9631                 }
9632         }
9633         tp->nvram_size = 0x80000;
9634 }
9635
9636 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9637 {
9638         u32 nvcfg1;
9639
9640         nvcfg1 = tr32(NVRAM_CFG1);
9641         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9642                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9643         }
9644         else {
9645                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9646                 tw32(NVRAM_CFG1, nvcfg1);
9647         }
9648
9649         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9650             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9651                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9652                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9653                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9654                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9655                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9656                                 break;
9657                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9658                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9659                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9660                                 break;
9661                         case FLASH_VENDOR_ATMEL_EEPROM:
9662                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9663                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9664                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9665                                 break;
9666                         case FLASH_VENDOR_ST:
9667                                 tp->nvram_jedecnum = JEDEC_ST;
9668                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9669                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9670                                 break;
9671                         case FLASH_VENDOR_SAIFUN:
9672                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9673                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9674                                 break;
9675                         case FLASH_VENDOR_SST_SMALL:
9676                         case FLASH_VENDOR_SST_LARGE:
9677                                 tp->nvram_jedecnum = JEDEC_SST;
9678                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9679                                 break;
9680                 }
9681         }
9682         else {
9683                 tp->nvram_jedecnum = JEDEC_ATMEL;
9684                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9685                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9686         }
9687 }
9688
9689 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9690 {
9691         u32 nvcfg1;
9692
9693         nvcfg1 = tr32(NVRAM_CFG1);
9694
9695         /* NVRAM protection for TPM */
9696         if (nvcfg1 & (1 << 27))
9697                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9698
9699         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9700                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9701                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9702                         tp->nvram_jedecnum = JEDEC_ATMEL;
9703                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9704                         break;
9705                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9706                         tp->nvram_jedecnum = JEDEC_ATMEL;
9707                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9708                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9709                         break;
9710                 case FLASH_5752VENDOR_ST_M45PE10:
9711                 case FLASH_5752VENDOR_ST_M45PE20:
9712                 case FLASH_5752VENDOR_ST_M45PE40:
9713                         tp->nvram_jedecnum = JEDEC_ST;
9714                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9715                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9716                         break;
9717         }
9718
9719         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9720                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9721                         case FLASH_5752PAGE_SIZE_256:
9722                                 tp->nvram_pagesize = 256;
9723                                 break;
9724                         case FLASH_5752PAGE_SIZE_512:
9725                                 tp->nvram_pagesize = 512;
9726                                 break;
9727                         case FLASH_5752PAGE_SIZE_1K:
9728                                 tp->nvram_pagesize = 1024;
9729                                 break;
9730                         case FLASH_5752PAGE_SIZE_2K:
9731                                 tp->nvram_pagesize = 2048;
9732                                 break;
9733                         case FLASH_5752PAGE_SIZE_4K:
9734                                 tp->nvram_pagesize = 4096;
9735                                 break;
9736                         case FLASH_5752PAGE_SIZE_264:
9737                                 tp->nvram_pagesize = 264;
9738                                 break;
9739                 }
9740         }
9741         else {
9742                 /* For eeprom, set pagesize to maximum eeprom size */
9743                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9744
9745                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9746                 tw32(NVRAM_CFG1, nvcfg1);
9747         }
9748 }
9749
9750 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9751 {
9752         u32 nvcfg1, protect = 0;
9753
9754         nvcfg1 = tr32(NVRAM_CFG1);
9755
9756         /* NVRAM protection for TPM */
9757         if (nvcfg1 & (1 << 27)) {
9758                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9759                 protect = 1;
9760         }
9761
9762         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9763         switch (nvcfg1) {
9764                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9765                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9766                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9767                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9768                         tp->nvram_jedecnum = JEDEC_ATMEL;
9769                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9770                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9771                         tp->nvram_pagesize = 264;
9772                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9773                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9774                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9775                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9776                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9777                         else
9778                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9779                         break;
9780                 case FLASH_5752VENDOR_ST_M45PE10:
9781                 case FLASH_5752VENDOR_ST_M45PE20:
9782                 case FLASH_5752VENDOR_ST_M45PE40:
9783                         tp->nvram_jedecnum = JEDEC_ST;
9784                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9785                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9786                         tp->nvram_pagesize = 256;
9787                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9788                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9789                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9790                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9791                         else
9792                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9793                         break;
9794         }
9795 }
9796
9797 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9798 {
9799         u32 nvcfg1;
9800
9801         nvcfg1 = tr32(NVRAM_CFG1);
9802
9803         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9804                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9805                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9806                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9807                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9808                         tp->nvram_jedecnum = JEDEC_ATMEL;
9809                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9810                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9811
9812                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9813                         tw32(NVRAM_CFG1, nvcfg1);
9814                         break;
9815                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9816                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9817                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9818                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9819                         tp->nvram_jedecnum = JEDEC_ATMEL;
9820                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9821                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9822                         tp->nvram_pagesize = 264;
9823                         break;
9824                 case FLASH_5752VENDOR_ST_M45PE10:
9825                 case FLASH_5752VENDOR_ST_M45PE20:
9826                 case FLASH_5752VENDOR_ST_M45PE40:
9827                         tp->nvram_jedecnum = JEDEC_ST;
9828                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9829                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9830                         tp->nvram_pagesize = 256;
9831                         break;
9832         }
9833 }
9834
9835 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9836 {
9837         u32 nvcfg1, protect = 0;
9838
9839         nvcfg1 = tr32(NVRAM_CFG1);
9840
9841         /* NVRAM protection for TPM */
9842         if (nvcfg1 & (1 << 27)) {
9843                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9844                 protect = 1;
9845         }
9846
9847         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9848         switch (nvcfg1) {
9849                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9850                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9851                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9852                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9853                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9854                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9855                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9856                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9857                         tp->nvram_jedecnum = JEDEC_ATMEL;
9858                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9859                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9860                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9861                         tp->nvram_pagesize = 256;
9862                         break;
9863                 case FLASH_5761VENDOR_ST_A_M45PE20:
9864                 case FLASH_5761VENDOR_ST_A_M45PE40:
9865                 case FLASH_5761VENDOR_ST_A_M45PE80:
9866                 case FLASH_5761VENDOR_ST_A_M45PE16:
9867                 case FLASH_5761VENDOR_ST_M_M45PE20:
9868                 case FLASH_5761VENDOR_ST_M_M45PE40:
9869                 case FLASH_5761VENDOR_ST_M_M45PE80:
9870                 case FLASH_5761VENDOR_ST_M_M45PE16:
9871                         tp->nvram_jedecnum = JEDEC_ST;
9872                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9873                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9874                         tp->nvram_pagesize = 256;
9875                         break;
9876         }
9877
9878         if (protect) {
9879                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9880         } else {
9881                 switch (nvcfg1) {
9882                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9883                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9884                         case FLASH_5761VENDOR_ST_A_M45PE16:
9885                         case FLASH_5761VENDOR_ST_M_M45PE16:
9886                                 tp->nvram_size = 0x100000;
9887                                 break;
9888                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9889                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9890                         case FLASH_5761VENDOR_ST_A_M45PE80:
9891                         case FLASH_5761VENDOR_ST_M_M45PE80:
9892                                 tp->nvram_size = 0x80000;
9893                                 break;
9894                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9895                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9896                         case FLASH_5761VENDOR_ST_A_M45PE40:
9897                         case FLASH_5761VENDOR_ST_M_M45PE40:
9898                                 tp->nvram_size = 0x40000;
9899                                 break;
9900                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9901                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9902                         case FLASH_5761VENDOR_ST_A_M45PE20:
9903                         case FLASH_5761VENDOR_ST_M_M45PE20:
9904                                 tp->nvram_size = 0x20000;
9905                                 break;
9906                 }
9907         }
9908 }
9909
9910 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9911 {
9912         tp->nvram_jedecnum = JEDEC_ATMEL;
9913         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9914         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9915 }
9916
9917 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9918 static void __devinit tg3_nvram_init(struct tg3 *tp)
9919 {
9920         tw32_f(GRC_EEPROM_ADDR,
9921              (EEPROM_ADDR_FSM_RESET |
9922               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9923                EEPROM_ADDR_CLKPERD_SHIFT)));
9924
9925         msleep(1);
9926
9927         /* Enable seeprom accesses. */
9928         tw32_f(GRC_LOCAL_CTRL,
9929              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9930         udelay(100);
9931
9932         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9933             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9934                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9935
9936                 if (tg3_nvram_lock(tp)) {
9937                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9938                                "tg3_nvram_init failed.\n", tp->dev->name);
9939                         return;
9940                 }
9941                 tg3_enable_nvram_access(tp);
9942
9943                 tp->nvram_size = 0;
9944
9945                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9946                         tg3_get_5752_nvram_info(tp);
9947                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9948                         tg3_get_5755_nvram_info(tp);
9949                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9950                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9951                         tg3_get_5787_nvram_info(tp);
9952                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9953                         tg3_get_5761_nvram_info(tp);
9954                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9955                         tg3_get_5906_nvram_info(tp);
9956                 else
9957                         tg3_get_nvram_info(tp);
9958
9959                 if (tp->nvram_size == 0)
9960                         tg3_get_nvram_size(tp);
9961
9962                 tg3_disable_nvram_access(tp);
9963                 tg3_nvram_unlock(tp);
9964
9965         } else {
9966                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9967
9968                 tg3_get_eeprom_size(tp);
9969         }
9970 }
9971
9972 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9973                                         u32 offset, u32 *val)
9974 {
9975         u32 tmp;
9976         int i;
9977
9978         if (offset > EEPROM_ADDR_ADDR_MASK ||
9979             (offset % 4) != 0)
9980                 return -EINVAL;
9981
9982         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9983                                         EEPROM_ADDR_DEVID_MASK |
9984                                         EEPROM_ADDR_READ);
9985         tw32(GRC_EEPROM_ADDR,
9986              tmp |
9987              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9988              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9989               EEPROM_ADDR_ADDR_MASK) |
9990              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9991
9992         for (i = 0; i < 1000; i++) {
9993                 tmp = tr32(GRC_EEPROM_ADDR);
9994
9995                 if (tmp & EEPROM_ADDR_COMPLETE)
9996                         break;
9997                 msleep(1);
9998         }
9999         if (!(tmp & EEPROM_ADDR_COMPLETE))
10000                 return -EBUSY;
10001
10002         *val = tr32(GRC_EEPROM_DATA);
10003         return 0;
10004 }
10005
10006 #define NVRAM_CMD_TIMEOUT 10000
10007
10008 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10009 {
10010         int i;
10011
10012         tw32(NVRAM_CMD, nvram_cmd);
10013         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10014                 udelay(10);
10015                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10016                         udelay(10);
10017                         break;
10018                 }
10019         }
10020         if (i == NVRAM_CMD_TIMEOUT) {
10021                 return -EBUSY;
10022         }
10023         return 0;
10024 }
10025
10026 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10027 {
10028         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10029             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10030             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10031            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10032             (tp->nvram_jedecnum == JEDEC_ATMEL))
10033
10034                 addr = ((addr / tp->nvram_pagesize) <<
10035                         ATMEL_AT45DB0X1B_PAGE_POS) +
10036                        (addr % tp->nvram_pagesize);
10037
10038         return addr;
10039 }
10040
10041 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10042 {
10043         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10044             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10045             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10046            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10047             (tp->nvram_jedecnum == JEDEC_ATMEL))
10048
10049                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10050                         tp->nvram_pagesize) +
10051                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10052
10053         return addr;
10054 }
10055
10056 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10057 {
10058         int ret;
10059
10060         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10061                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10062
10063         offset = tg3_nvram_phys_addr(tp, offset);
10064
10065         if (offset > NVRAM_ADDR_MSK)
10066                 return -EINVAL;
10067
10068         ret = tg3_nvram_lock(tp);
10069         if (ret)
10070                 return ret;
10071
10072         tg3_enable_nvram_access(tp);
10073
10074         tw32(NVRAM_ADDR, offset);
10075         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10076                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10077
10078         if (ret == 0)
10079                 *val = swab32(tr32(NVRAM_RDDATA));
10080
10081         tg3_disable_nvram_access(tp);
10082
10083         tg3_nvram_unlock(tp);
10084
10085         return ret;
10086 }
10087
10088 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10089 {
10090         int err;
10091         u32 tmp;
10092
10093         err = tg3_nvram_read(tp, offset, &tmp);
10094         *val = swab32(tmp);
10095         return err;
10096 }
10097
10098 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10099                                     u32 offset, u32 len, u8 *buf)
10100 {
10101         int i, j, rc = 0;
10102         u32 val;
10103
10104         for (i = 0; i < len; i += 4) {
10105                 u32 addr, data;
10106
10107                 addr = offset + i;
10108
10109                 memcpy(&data, buf + i, 4);
10110
10111                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10112
10113                 val = tr32(GRC_EEPROM_ADDR);
10114                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10115
10116                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10117                         EEPROM_ADDR_READ);
10118                 tw32(GRC_EEPROM_ADDR, val |
10119                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10120                         (addr & EEPROM_ADDR_ADDR_MASK) |
10121                         EEPROM_ADDR_START |
10122                         EEPROM_ADDR_WRITE);
10123
10124                 for (j = 0; j < 1000; j++) {
10125                         val = tr32(GRC_EEPROM_ADDR);
10126
10127                         if (val & EEPROM_ADDR_COMPLETE)
10128                                 break;
10129                         msleep(1);
10130                 }
10131                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10132                         rc = -EBUSY;
10133                         break;
10134                 }
10135         }
10136
10137         return rc;
10138 }
10139
10140 /* offset and length are dword aligned */
10141 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10142                 u8 *buf)
10143 {
10144         int ret = 0;
10145         u32 pagesize = tp->nvram_pagesize;
10146         u32 pagemask = pagesize - 1;
10147         u32 nvram_cmd;
10148         u8 *tmp;
10149
10150         tmp = kmalloc(pagesize, GFP_KERNEL);
10151         if (tmp == NULL)
10152                 return -ENOMEM;
10153
10154         while (len) {
10155                 int j;
10156                 u32 phy_addr, page_off, size;
10157
10158                 phy_addr = offset & ~pagemask;
10159
10160                 for (j = 0; j < pagesize; j += 4) {
10161                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10162                                                 (u32 *) (tmp + j))))
10163                                 break;
10164                 }
10165                 if (ret)
10166                         break;
10167
10168                 page_off = offset & pagemask;
10169                 size = pagesize;
10170                 if (len < size)
10171                         size = len;
10172
10173                 len -= size;
10174
10175                 memcpy(tmp + page_off, buf, size);
10176
10177                 offset = offset + (pagesize - page_off);
10178
10179                 tg3_enable_nvram_access(tp);
10180
10181                 /*
10182                  * Before we can erase the flash page, we need
10183                  * to issue a special "write enable" command.
10184                  */
10185                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10186
10187                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10188                         break;
10189
10190                 /* Erase the target page */
10191                 tw32(NVRAM_ADDR, phy_addr);
10192
10193                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10194                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10195
10196                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10197                         break;
10198
10199                 /* Issue another write enable to start the write. */
10200                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10201
10202                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10203                         break;
10204
10205                 for (j = 0; j < pagesize; j += 4) {
10206                         u32 data;
10207
10208                         data = *((u32 *) (tmp + j));
10209                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10210
10211                         tw32(NVRAM_ADDR, phy_addr + j);
10212
10213                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10214                                 NVRAM_CMD_WR;
10215
10216                         if (j == 0)
10217                                 nvram_cmd |= NVRAM_CMD_FIRST;
10218                         else if (j == (pagesize - 4))
10219                                 nvram_cmd |= NVRAM_CMD_LAST;
10220
10221                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10222                                 break;
10223                 }
10224                 if (ret)
10225                         break;
10226         }
10227
10228         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10229         tg3_nvram_exec_cmd(tp, nvram_cmd);
10230
10231         kfree(tmp);
10232
10233         return ret;
10234 }
10235
10236 /* offset and length are dword aligned */
10237 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10238                 u8 *buf)
10239 {
10240         int i, ret = 0;
10241
10242         for (i = 0; i < len; i += 4, offset += 4) {
10243                 u32 data, page_off, phy_addr, nvram_cmd;
10244
10245                 memcpy(&data, buf + i, 4);
10246                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10247
10248                 page_off = offset % tp->nvram_pagesize;
10249
10250                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10251
10252                 tw32(NVRAM_ADDR, phy_addr);
10253
10254                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10255
10256                 if ((page_off == 0) || (i == 0))
10257                         nvram_cmd |= NVRAM_CMD_FIRST;
10258                 if (page_off == (tp->nvram_pagesize - 4))
10259                         nvram_cmd |= NVRAM_CMD_LAST;
10260
10261                 if (i == (len - 4))
10262                         nvram_cmd |= NVRAM_CMD_LAST;
10263
10264                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10265                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10266                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10267                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10268                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10269                     (tp->nvram_jedecnum == JEDEC_ST) &&
10270                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10271
10272                         if ((ret = tg3_nvram_exec_cmd(tp,
10273                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10274                                 NVRAM_CMD_DONE)))
10275
10276                                 break;
10277                 }
10278                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10279                         /* We always do complete word writes to eeprom. */
10280                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10281                 }
10282
10283                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10284                         break;
10285         }
10286         return ret;
10287 }
10288
10289 /* offset and length are dword aligned */
10290 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10291 {
10292         int ret;
10293
10294         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10295                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10296                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10297                 udelay(40);
10298         }
10299
10300         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10301                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10302         }
10303         else {
10304                 u32 grc_mode;
10305
10306                 ret = tg3_nvram_lock(tp);
10307                 if (ret)
10308                         return ret;
10309
10310                 tg3_enable_nvram_access(tp);
10311                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10312                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10313                         tw32(NVRAM_WRITE1, 0x406);
10314
10315                 grc_mode = tr32(GRC_MODE);
10316                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10317
10318                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10319                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10320
10321                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10322                                 buf);
10323                 }
10324                 else {
10325                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10326                                 buf);
10327                 }
10328
10329                 grc_mode = tr32(GRC_MODE);
10330                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10331
10332                 tg3_disable_nvram_access(tp);
10333                 tg3_nvram_unlock(tp);
10334         }
10335
10336         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10337                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10338                 udelay(40);
10339         }
10340
10341         return ret;
10342 }
10343
10344 struct subsys_tbl_ent {
10345         u16 subsys_vendor, subsys_devid;
10346         u32 phy_id;
10347 };
10348
10349 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10350         /* Broadcom boards. */
10351         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10352         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10353         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10354         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10355         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10356         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10357         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10358         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10359         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10360         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10361         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10362
10363         /* 3com boards. */
10364         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10365         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10366         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10367         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10368         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10369
10370         /* DELL boards. */
10371         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10372         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10373         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10374         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10375
10376         /* Compaq boards. */
10377         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10378         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10379         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10380         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10381         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10382
10383         /* IBM boards. */
10384         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10385 };
10386
10387 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10388 {
10389         int i;
10390
10391         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10392                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10393                      tp->pdev->subsystem_vendor) &&
10394                     (subsys_id_to_phy_id[i].subsys_devid ==
10395                      tp->pdev->subsystem_device))
10396                         return &subsys_id_to_phy_id[i];
10397         }
10398         return NULL;
10399 }
10400
10401 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10402 {
10403         u32 val;
10404         u16 pmcsr;
10405
10406         /* On some early chips the SRAM cannot be accessed in D3hot state,
10407          * so need make sure we're in D0.
10408          */
10409         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10410         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10411         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10412         msleep(1);
10413
10414         /* Make sure register accesses (indirect or otherwise)
10415          * will function correctly.
10416          */
10417         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10418                                tp->misc_host_ctrl);
10419
10420         /* The memory arbiter has to be enabled in order for SRAM accesses
10421          * to succeed.  Normally on powerup the tg3 chip firmware will make
10422          * sure it is enabled, but other entities such as system netboot
10423          * code might disable it.
10424          */
10425         val = tr32(MEMARB_MODE);
10426         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10427
10428         tp->phy_id = PHY_ID_INVALID;
10429         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10430
10431         /* Assume an onboard device and WOL capable by default.  */
10432         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10433
10434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10435                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10436                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10437                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10438                 }
10439                 val = tr32(VCPU_CFGSHDW);
10440                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10441                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10442                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10443                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10444                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10445                 return;
10446         }
10447
10448         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10449         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10450                 u32 nic_cfg, led_cfg;
10451                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10452                 int eeprom_phy_serdes = 0;
10453
10454                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10455                 tp->nic_sram_data_cfg = nic_cfg;
10456
10457                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10458                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10459                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10460                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10461                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10462                     (ver > 0) && (ver < 0x100))
10463                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10464
10465                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10466                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10467                         eeprom_phy_serdes = 1;
10468
10469                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10470                 if (nic_phy_id != 0) {
10471                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10472                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10473
10474                         eeprom_phy_id  = (id1 >> 16) << 10;
10475                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10476                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10477                 } else
10478                         eeprom_phy_id = 0;
10479
10480                 tp->phy_id = eeprom_phy_id;
10481                 if (eeprom_phy_serdes) {
10482                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10483                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10484                         else
10485                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10486                 }
10487
10488                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10489                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10490                                     SHASTA_EXT_LED_MODE_MASK);
10491                 else
10492                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10493
10494                 switch (led_cfg) {
10495                 default:
10496                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10497                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10498                         break;
10499
10500                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10501                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10502                         break;
10503
10504                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10505                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10506
10507                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10508                          * read on some older 5700/5701 bootcode.
10509                          */
10510                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10511                             ASIC_REV_5700 ||
10512                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10513                             ASIC_REV_5701)
10514                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10515
10516                         break;
10517
10518                 case SHASTA_EXT_LED_SHARED:
10519                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10520                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10521                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10522                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10523                                                  LED_CTRL_MODE_PHY_2);
10524                         break;
10525
10526                 case SHASTA_EXT_LED_MAC:
10527                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10528                         break;
10529
10530                 case SHASTA_EXT_LED_COMBO:
10531                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10532                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10533                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10534                                                  LED_CTRL_MODE_PHY_2);
10535                         break;
10536
10537                 };
10538
10539                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10540                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10541                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10542                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10543
10544                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10545                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10546                         if ((tp->pdev->subsystem_vendor ==
10547                              PCI_VENDOR_ID_ARIMA) &&
10548                             (tp->pdev->subsystem_device == 0x205a ||
10549                              tp->pdev->subsystem_device == 0x2063))
10550                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10551                 } else {
10552                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10553                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10554                 }
10555
10556                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10557                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10558                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10559                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10560                 }
10561                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10562                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10563                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10564                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10565                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10566
10567                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10568                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10569                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10570
10571                 if (cfg2 & (1 << 17))
10572                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10573
10574                 /* serdes signal pre-emphasis in register 0x590 set by */
10575                 /* bootcode if bit 18 is set */
10576                 if (cfg2 & (1 << 18))
10577                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10578
10579                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10580                         u32 cfg3;
10581
10582                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10583                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10584                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10585                 }
10586         }
10587 }
10588
10589 static int __devinit tg3_phy_probe(struct tg3 *tp)
10590 {
10591         u32 hw_phy_id_1, hw_phy_id_2;
10592         u32 hw_phy_id, hw_phy_id_masked;
10593         int err;
10594
10595         /* Reading the PHY ID register can conflict with ASF
10596          * firwmare access to the PHY hardware.
10597          */
10598         err = 0;
10599         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10600             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10601                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10602         } else {
10603                 /* Now read the physical PHY_ID from the chip and verify
10604                  * that it is sane.  If it doesn't look good, we fall back
10605                  * to either the hard-coded table based PHY_ID and failing
10606                  * that the value found in the eeprom area.
10607                  */
10608                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10609                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10610
10611                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10612                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10613                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10614
10615                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10616         }
10617
10618         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10619                 tp->phy_id = hw_phy_id;
10620                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10621                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10622                 else
10623                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10624         } else {
10625                 if (tp->phy_id != PHY_ID_INVALID) {
10626                         /* Do nothing, phy ID already set up in
10627                          * tg3_get_eeprom_hw_cfg().
10628                          */
10629                 } else {
10630                         struct subsys_tbl_ent *p;
10631
10632                         /* No eeprom signature?  Try the hardcoded
10633                          * subsys device table.
10634                          */
10635                         p = lookup_by_subsys(tp);
10636                         if (!p)
10637                                 return -ENODEV;
10638
10639                         tp->phy_id = p->phy_id;
10640                         if (!tp->phy_id ||
10641                             tp->phy_id == PHY_ID_BCM8002)
10642                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10643                 }
10644         }
10645
10646         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10647             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10648             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10649                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10650
10651                 tg3_readphy(tp, MII_BMSR, &bmsr);
10652                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10653                     (bmsr & BMSR_LSTATUS))
10654                         goto skip_phy_reset;
10655
10656                 err = tg3_phy_reset(tp);
10657                 if (err)
10658                         return err;
10659
10660                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10661                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10662                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10663                 tg3_ctrl = 0;
10664                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10665                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10666                                     MII_TG3_CTRL_ADV_1000_FULL);
10667                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10668                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10669                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10670                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10671                 }
10672
10673                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10674                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10675                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10676                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10677                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10678
10679                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10680                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10681
10682                         tg3_writephy(tp, MII_BMCR,
10683                                      BMCR_ANENABLE | BMCR_ANRESTART);
10684                 }
10685                 tg3_phy_set_wirespeed(tp);
10686
10687                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10688                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10689                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10690         }
10691
10692 skip_phy_reset:
10693         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10694                 err = tg3_init_5401phy_dsp(tp);
10695                 if (err)
10696                         return err;
10697         }
10698
10699         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10700                 err = tg3_init_5401phy_dsp(tp);
10701         }
10702
10703         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10704                 tp->link_config.advertising =
10705                         (ADVERTISED_1000baseT_Half |
10706                          ADVERTISED_1000baseT_Full |
10707                          ADVERTISED_Autoneg |
10708                          ADVERTISED_FIBRE);
10709         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10710                 tp->link_config.advertising &=
10711                         ~(ADVERTISED_1000baseT_Half |
10712                           ADVERTISED_1000baseT_Full);
10713
10714         return err;
10715 }
10716
10717 static void __devinit tg3_read_partno(struct tg3 *tp)
10718 {
10719         unsigned char vpd_data[256];
10720         unsigned int i;
10721         u32 magic;
10722
10723         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10724                 goto out_not_found;
10725
10726         if (magic == TG3_EEPROM_MAGIC) {
10727                 for (i = 0; i < 256; i += 4) {
10728                         u32 tmp;
10729
10730                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10731                                 goto out_not_found;
10732
10733                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10734                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10735                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10736                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10737                 }
10738         } else {
10739                 int vpd_cap;
10740
10741                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10742                 for (i = 0; i < 256; i += 4) {
10743                         u32 tmp, j = 0;
10744                         u16 tmp16;
10745
10746                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10747                                               i);
10748                         while (j++ < 100) {
10749                                 pci_read_config_word(tp->pdev, vpd_cap +
10750                                                      PCI_VPD_ADDR, &tmp16);
10751                                 if (tmp16 & 0x8000)
10752                                         break;
10753                                 msleep(1);
10754                         }
10755                         if (!(tmp16 & 0x8000))
10756                                 goto out_not_found;
10757
10758                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10759                                               &tmp);
10760                         tmp = cpu_to_le32(tmp);
10761                         memcpy(&vpd_data[i], &tmp, 4);
10762                 }
10763         }
10764
10765         /* Now parse and find the part number. */
10766         for (i = 0; i < 254; ) {
10767                 unsigned char val = vpd_data[i];
10768                 unsigned int block_end;
10769
10770                 if (val == 0x82 || val == 0x91) {
10771                         i = (i + 3 +
10772                              (vpd_data[i + 1] +
10773                               (vpd_data[i + 2] << 8)));
10774                         continue;
10775                 }
10776
10777                 if (val != 0x90)
10778                         goto out_not_found;
10779
10780                 block_end = (i + 3 +
10781                              (vpd_data[i + 1] +
10782                               (vpd_data[i + 2] << 8)));
10783                 i += 3;
10784
10785                 if (block_end > 256)
10786                         goto out_not_found;
10787
10788                 while (i < (block_end - 2)) {
10789                         if (vpd_data[i + 0] == 'P' &&
10790                             vpd_data[i + 1] == 'N') {
10791                                 int partno_len = vpd_data[i + 2];
10792
10793                                 i += 3;
10794                                 if (partno_len > 24 || (partno_len + i) > 256)
10795                                         goto out_not_found;
10796
10797                                 memcpy(tp->board_part_number,
10798                                        &vpd_data[i], partno_len);
10799
10800                                 /* Success. */
10801                                 return;
10802                         }
10803                         i += 3 + vpd_data[i + 2];
10804                 }
10805
10806                 /* Part number not found. */
10807                 goto out_not_found;
10808         }
10809
10810 out_not_found:
10811         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10812                 strcpy(tp->board_part_number, "BCM95906");
10813         else
10814                 strcpy(tp->board_part_number, "none");
10815 }
10816
10817 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10818 {
10819         u32 val, offset, start;
10820
10821         if (tg3_nvram_read_swab(tp, 0, &val))
10822                 return;
10823
10824         if (val != TG3_EEPROM_MAGIC)
10825                 return;
10826
10827         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10828             tg3_nvram_read_swab(tp, 0x4, &start))
10829                 return;
10830
10831         offset = tg3_nvram_logical_addr(tp, offset);
10832         if (tg3_nvram_read_swab(tp, offset, &val))
10833                 return;
10834
10835         if ((val & 0xfc000000) == 0x0c000000) {
10836                 u32 ver_offset, addr;
10837                 int i;
10838
10839                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10840                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10841                         return;
10842
10843                 if (val != 0)
10844                         return;
10845
10846                 addr = offset + ver_offset - start;
10847                 for (i = 0; i < 16; i += 4) {
10848                         if (tg3_nvram_read(tp, addr + i, &val))
10849                                 return;
10850
10851                         val = cpu_to_le32(val);
10852                         memcpy(tp->fw_ver + i, &val, 4);
10853                 }
10854         }
10855 }
10856
10857 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10858
10859 static int __devinit tg3_get_invariants(struct tg3 *tp)
10860 {
10861         static struct pci_device_id write_reorder_chipsets[] = {
10862                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10863                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10864                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10865                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10866                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10867                              PCI_DEVICE_ID_VIA_8385_0) },
10868                 { },
10869         };
10870         u32 misc_ctrl_reg;
10871         u32 cacheline_sz_reg;
10872         u32 pci_state_reg, grc_misc_cfg;
10873         u32 val;
10874         u16 pci_cmd;
10875         int err, pcie_cap;
10876
10877         /* Force memory write invalidate off.  If we leave it on,
10878          * then on 5700_BX chips we have to enable a workaround.
10879          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10880          * to match the cacheline size.  The Broadcom driver have this
10881          * workaround but turns MWI off all the times so never uses
10882          * it.  This seems to suggest that the workaround is insufficient.
10883          */
10884         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10885         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10886         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10887
10888         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10889          * has the register indirect write enable bit set before
10890          * we try to access any of the MMIO registers.  It is also
10891          * critical that the PCI-X hw workaround situation is decided
10892          * before that as well.
10893          */
10894         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10895                               &misc_ctrl_reg);
10896
10897         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10898                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10900                 u32 prod_id_asic_rev;
10901
10902                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10903                                       &prod_id_asic_rev);
10904                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10905         }
10906
10907         /* Wrong chip ID in 5752 A0. This code can be removed later
10908          * as A0 is not in production.
10909          */
10910         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10911                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10912
10913         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10914          * we need to disable memory and use config. cycles
10915          * only to access all registers. The 5702/03 chips
10916          * can mistakenly decode the special cycles from the
10917          * ICH chipsets as memory write cycles, causing corruption
10918          * of register and memory space. Only certain ICH bridges
10919          * will drive special cycles with non-zero data during the
10920          * address phase which can fall within the 5703's address
10921          * range. This is not an ICH bug as the PCI spec allows
10922          * non-zero address during special cycles. However, only
10923          * these ICH bridges are known to drive non-zero addresses
10924          * during special cycles.
10925          *
10926          * Since special cycles do not cross PCI bridges, we only
10927          * enable this workaround if the 5703 is on the secondary
10928          * bus of these ICH bridges.
10929          */
10930         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10931             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10932                 static struct tg3_dev_id {
10933                         u32     vendor;
10934                         u32     device;
10935                         u32     rev;
10936                 } ich_chipsets[] = {
10937                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10938                           PCI_ANY_ID },
10939                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10940                           PCI_ANY_ID },
10941                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10942                           0xa },
10943                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10944                           PCI_ANY_ID },
10945                         { },
10946                 };
10947                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10948                 struct pci_dev *bridge = NULL;
10949
10950                 while (pci_id->vendor != 0) {
10951                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10952                                                 bridge);
10953                         if (!bridge) {
10954                                 pci_id++;
10955                                 continue;
10956                         }
10957                         if (pci_id->rev != PCI_ANY_ID) {
10958                                 if (bridge->revision > pci_id->rev)
10959                                         continue;
10960                         }
10961                         if (bridge->subordinate &&
10962                             (bridge->subordinate->number ==
10963                              tp->pdev->bus->number)) {
10964
10965                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10966                                 pci_dev_put(bridge);
10967                                 break;
10968                         }
10969                 }
10970         }
10971
10972         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10973          * DMA addresses > 40-bit. This bridge may have other additional
10974          * 57xx devices behind it in some 4-port NIC designs for example.
10975          * Any tg3 device found behind the bridge will also need the 40-bit
10976          * DMA workaround.
10977          */
10978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10979             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10980                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10981                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10982                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10983         }
10984         else {
10985                 struct pci_dev *bridge = NULL;
10986
10987                 do {
10988                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10989                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10990                                                 bridge);
10991                         if (bridge && bridge->subordinate &&
10992                             (bridge->subordinate->number <=
10993                              tp->pdev->bus->number) &&
10994                             (bridge->subordinate->subordinate >=
10995                              tp->pdev->bus->number)) {
10996                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10997                                 pci_dev_put(bridge);
10998                                 break;
10999                         }
11000                 } while (bridge);
11001         }
11002
11003         /* Initialize misc host control in PCI block. */
11004         tp->misc_host_ctrl |= (misc_ctrl_reg &
11005                                MISC_HOST_CTRL_CHIPREV);
11006         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11007                                tp->misc_host_ctrl);
11008
11009         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11010                               &cacheline_sz_reg);
11011
11012         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11013         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11014         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11015         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11016
11017         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11018             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11019                 tp->pdev_peer = tg3_find_peer(tp);
11020
11021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11024             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11025             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11026             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11027             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11028             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11029                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11030
11031         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11032             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11033                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11034
11035         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11036                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11037                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11038                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11039                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11040                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11041                      tp->pdev_peer == tp->pdev))
11042                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11043
11044                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11045                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11046                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11048                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11049                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11050                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11051                 } else {
11052                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11053                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11054                                 ASIC_REV_5750 &&
11055                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11056                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11057                 }
11058         }
11059
11060         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11061             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11062             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11063             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11065             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11066             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11067             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11068                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11069
11070         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11071         if (pcie_cap != 0) {
11072                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11073                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11074                         u16 lnkctl;
11075
11076                         pci_read_config_word(tp->pdev,
11077                                              pcie_cap + PCI_EXP_LNKCTL,
11078                                              &lnkctl);
11079                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11080                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11081                 }
11082         }
11083
11084         /* If we have an AMD 762 or VIA K8T800 chipset, write
11085          * reordering to the mailbox registers done by the host
11086          * controller can cause major troubles.  We read back from
11087          * every mailbox register write to force the writes to be
11088          * posted to the chip in order.
11089          */
11090         if (pci_dev_present(write_reorder_chipsets) &&
11091             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11092                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11093
11094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11095             tp->pci_lat_timer < 64) {
11096                 tp->pci_lat_timer = 64;
11097
11098                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11099                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11100                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11101                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11102
11103                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11104                                        cacheline_sz_reg);
11105         }
11106
11107         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11108             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11109                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11110                 if (!tp->pcix_cap) {
11111                         printk(KERN_ERR PFX "Cannot find PCI-X "
11112                                             "capability, aborting.\n");
11113                         return -EIO;
11114                 }
11115         }
11116
11117         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11118                               &pci_state_reg);
11119
11120         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11121                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11122
11123                 /* If this is a 5700 BX chipset, and we are in PCI-X
11124                  * mode, enable register write workaround.
11125                  *
11126                  * The workaround is to use indirect register accesses
11127                  * for all chip writes not to mailbox registers.
11128                  */
11129                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11130                         u32 pm_reg;
11131
11132                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11133
11134                         /* The chip can have it's power management PCI config
11135                          * space registers clobbered due to this bug.
11136                          * So explicitly force the chip into D0 here.
11137                          */
11138                         pci_read_config_dword(tp->pdev,
11139                                               tp->pm_cap + PCI_PM_CTRL,
11140                                               &pm_reg);
11141                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11142                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11143                         pci_write_config_dword(tp->pdev,
11144                                                tp->pm_cap + PCI_PM_CTRL,
11145                                                pm_reg);
11146
11147                         /* Also, force SERR#/PERR# in PCI command. */
11148                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11149                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11150                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11151                 }
11152         }
11153
11154         /* 5700 BX chips need to have their TX producer index mailboxes
11155          * written twice to workaround a bug.
11156          */
11157         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11158                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11159
11160         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11161                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11162         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11163                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11164
11165         /* Chip-specific fixup from Broadcom driver */
11166         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11167             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11168                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11169                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11170         }
11171
11172         /* Default fast path register access methods */
11173         tp->read32 = tg3_read32;
11174         tp->write32 = tg3_write32;
11175         tp->read32_mbox = tg3_read32;
11176         tp->write32_mbox = tg3_write32;
11177         tp->write32_tx_mbox = tg3_write32;
11178         tp->write32_rx_mbox = tg3_write32;
11179
11180         /* Various workaround register access methods */
11181         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11182                 tp->write32 = tg3_write_indirect_reg32;
11183         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11184                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11185                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11186                 /*
11187                  * Back to back register writes can cause problems on these
11188                  * chips, the workaround is to read back all reg writes
11189                  * except those to mailbox regs.
11190                  *
11191                  * See tg3_write_indirect_reg32().
11192                  */
11193                 tp->write32 = tg3_write_flush_reg32;
11194         }
11195
11196
11197         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11198             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11199                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11200                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11201                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11202         }
11203
11204         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11205                 tp->read32 = tg3_read_indirect_reg32;
11206                 tp->write32 = tg3_write_indirect_reg32;
11207                 tp->read32_mbox = tg3_read_indirect_mbox;
11208                 tp->write32_mbox = tg3_write_indirect_mbox;
11209                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11210                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11211
11212                 iounmap(tp->regs);
11213                 tp->regs = NULL;
11214
11215                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11216                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11217                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11218         }
11219         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11220                 tp->read32_mbox = tg3_read32_mbox_5906;
11221                 tp->write32_mbox = tg3_write32_mbox_5906;
11222                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11223                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11224         }
11225
11226         if (tp->write32 == tg3_write_indirect_reg32 ||
11227             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11228              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11229               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11230                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11231
11232         /* Get eeprom hw config before calling tg3_set_power_state().
11233          * In particular, the TG3_FLG2_IS_NIC flag must be
11234          * determined before calling tg3_set_power_state() so that
11235          * we know whether or not to switch out of Vaux power.
11236          * When the flag is set, it means that GPIO1 is used for eeprom
11237          * write protect and also implies that it is a LOM where GPIOs
11238          * are not used to switch power.
11239          */
11240         tg3_get_eeprom_hw_cfg(tp);
11241
11242         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11243                 /* Allow reads and writes to the
11244                  * APE register and memory space.
11245                  */
11246                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11247                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11248                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11249                                        pci_state_reg);
11250         }
11251
11252         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11253             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11254                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11255
11256         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11257          * GPIO1 driven high will bring 5700's external PHY out of reset.
11258          * It is also used as eeprom write protect on LOMs.
11259          */
11260         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11261         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11262             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11263                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11264                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11265         /* Unused GPIO3 must be driven as output on 5752 because there
11266          * are no pull-up resistors on unused GPIO pins.
11267          */
11268         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11269                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11270
11271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11272                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11273
11274         /* Force the chip into D0. */
11275         err = tg3_set_power_state(tp, PCI_D0);
11276         if (err) {
11277                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11278                        pci_name(tp->pdev));
11279                 return err;
11280         }
11281
11282         /* 5700 B0 chips do not support checksumming correctly due
11283          * to hardware bugs.
11284          */
11285         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11286                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11287
11288         /* Derive initial jumbo mode from MTU assigned in
11289          * ether_setup() via the alloc_etherdev() call
11290          */
11291         if (tp->dev->mtu > ETH_DATA_LEN &&
11292             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11293                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11294
11295         /* Determine WakeOnLan speed to use. */
11296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11297             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11298             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11299             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11300                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11301         } else {
11302                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11303         }
11304
11305         /* A few boards don't want Ethernet@WireSpeed phy feature */
11306         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11307             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11308              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11309              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11310             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11311             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11312                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11313
11314         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11315             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11316                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11317         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11318                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11319
11320         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11322                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11323                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11324                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11325                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11326                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11327                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11328                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11329                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11330                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11331                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11332         }
11333
11334         tp->coalesce_mode = 0;
11335         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11336             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11337                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11338
11339         /* Initialize MAC MI mode, polling disabled. */
11340         tw32_f(MAC_MI_MODE, tp->mi_mode);
11341         udelay(80);
11342
11343         /* Initialize data/descriptor byte/word swapping. */
11344         val = tr32(GRC_MODE);
11345         val &= GRC_MODE_HOST_STACKUP;
11346         tw32(GRC_MODE, val | tp->grc_mode);
11347
11348         tg3_switch_clocks(tp);
11349
11350         /* Clear this out for sanity. */
11351         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11352
11353         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11354                               &pci_state_reg);
11355         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11356             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11357                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11358
11359                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11360                     chiprevid == CHIPREV_ID_5701_B0 ||
11361                     chiprevid == CHIPREV_ID_5701_B2 ||
11362                     chiprevid == CHIPREV_ID_5701_B5) {
11363                         void __iomem *sram_base;
11364
11365                         /* Write some dummy words into the SRAM status block
11366                          * area, see if it reads back correctly.  If the return
11367                          * value is bad, force enable the PCIX workaround.
11368                          */
11369                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11370
11371                         writel(0x00000000, sram_base);
11372                         writel(0x00000000, sram_base + 4);
11373                         writel(0xffffffff, sram_base + 4);
11374                         if (readl(sram_base) != 0x00000000)
11375                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11376                 }
11377         }
11378
11379         udelay(50);
11380         tg3_nvram_init(tp);
11381
11382         grc_misc_cfg = tr32(GRC_MISC_CFG);
11383         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11384
11385         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11386             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11387              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11388                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11389
11390         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11391             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11392                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11393         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11394                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11395                                       HOSTCC_MODE_CLRTICK_TXBD);
11396
11397                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11398                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11399                                        tp->misc_host_ctrl);
11400         }
11401
11402         /* these are limited to 10/100 only */
11403         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11404              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11405             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11406              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11407              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11408               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11409               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11410             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11411              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11412               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11413               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11414             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11415                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11416
11417         err = tg3_phy_probe(tp);
11418         if (err) {
11419                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11420                        pci_name(tp->pdev), err);
11421                 /* ... but do not return immediately ... */
11422         }
11423
11424         tg3_read_partno(tp);
11425         tg3_read_fw_ver(tp);
11426
11427         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11428                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11429         } else {
11430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11431                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11432                 else
11433                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11434         }
11435
11436         /* 5700 {AX,BX} chips have a broken status block link
11437          * change bit implementation, so we must use the
11438          * status register in those cases.
11439          */
11440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11441                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11442         else
11443                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11444
11445         /* The led_ctrl is set during tg3_phy_probe, here we might
11446          * have to force the link status polling mechanism based
11447          * upon subsystem IDs.
11448          */
11449         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11451             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11452                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11453                                   TG3_FLAG_USE_LINKCHG_REG);
11454         }
11455
11456         /* For all SERDES we poll the MAC status register. */
11457         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11458                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11459         else
11460                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11461
11462         /* All chips before 5787 can get confused if TX buffers
11463          * straddle the 4GB address boundary in some cases.
11464          */
11465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11466             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11467             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11469             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11470                 tp->dev->hard_start_xmit = tg3_start_xmit;
11471         else
11472                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11473
11474         tp->rx_offset = 2;
11475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11476             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11477                 tp->rx_offset = 0;
11478
11479         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11480
11481         /* Increment the rx prod index on the rx std ring by at most
11482          * 8 for these chips to workaround hw errata.
11483          */
11484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11487                 tp->rx_std_max_post = 8;
11488
11489         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11490                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11491                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11492
11493         return err;
11494 }
11495
11496 #ifdef CONFIG_SPARC
11497 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11498 {
11499         struct net_device *dev = tp->dev;
11500         struct pci_dev *pdev = tp->pdev;
11501         struct device_node *dp = pci_device_to_OF_node(pdev);
11502         const unsigned char *addr;
11503         int len;
11504
11505         addr = of_get_property(dp, "local-mac-address", &len);
11506         if (addr && len == 6) {
11507                 memcpy(dev->dev_addr, addr, 6);
11508                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11509                 return 0;
11510         }
11511         return -ENODEV;
11512 }
11513
11514 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11515 {
11516         struct net_device *dev = tp->dev;
11517
11518         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11519         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11520         return 0;
11521 }
11522 #endif
11523
11524 static int __devinit tg3_get_device_address(struct tg3 *tp)
11525 {
11526         struct net_device *dev = tp->dev;
11527         u32 hi, lo, mac_offset;
11528         int addr_ok = 0;
11529
11530 #ifdef CONFIG_SPARC
11531         if (!tg3_get_macaddr_sparc(tp))
11532                 return 0;
11533 #endif
11534
11535         mac_offset = 0x7c;
11536         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11537             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11538                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11539                         mac_offset = 0xcc;
11540                 if (tg3_nvram_lock(tp))
11541                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11542                 else
11543                         tg3_nvram_unlock(tp);
11544         }
11545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11546                 mac_offset = 0x10;
11547
11548         /* First try to get it from MAC address mailbox. */
11549         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11550         if ((hi >> 16) == 0x484b) {
11551                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11552                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11553
11554                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11555                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11556                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11557                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11558                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11559
11560                 /* Some old bootcode may report a 0 MAC address in SRAM */
11561                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11562         }
11563         if (!addr_ok) {
11564                 /* Next, try NVRAM. */
11565                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11566                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11567                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11568                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11569                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11570                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11571                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11572                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11573                 }
11574                 /* Finally just fetch it out of the MAC control regs. */
11575                 else {
11576                         hi = tr32(MAC_ADDR_0_HIGH);
11577                         lo = tr32(MAC_ADDR_0_LOW);
11578
11579                         dev->dev_addr[5] = lo & 0xff;
11580                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11581                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11582                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11583                         dev->dev_addr[1] = hi & 0xff;
11584                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11585                 }
11586         }
11587
11588         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11589 #ifdef CONFIG_SPARC64
11590                 if (!tg3_get_default_macaddr_sparc(tp))
11591                         return 0;
11592 #endif
11593                 return -EINVAL;
11594         }
11595         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11596         return 0;
11597 }
11598
11599 #define BOUNDARY_SINGLE_CACHELINE       1
11600 #define BOUNDARY_MULTI_CACHELINE        2
11601
11602 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11603 {
11604         int cacheline_size;
11605         u8 byte;
11606         int goal;
11607
11608         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11609         if (byte == 0)
11610                 cacheline_size = 1024;
11611         else
11612                 cacheline_size = (int) byte * 4;
11613
11614         /* On 5703 and later chips, the boundary bits have no
11615          * effect.
11616          */
11617         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11618             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11619             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11620                 goto out;
11621
11622 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11623         goal = BOUNDARY_MULTI_CACHELINE;
11624 #else
11625 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11626         goal = BOUNDARY_SINGLE_CACHELINE;
11627 #else
11628         goal = 0;
11629 #endif
11630 #endif
11631
11632         if (!goal)
11633                 goto out;
11634
11635         /* PCI controllers on most RISC systems tend to disconnect
11636          * when a device tries to burst across a cache-line boundary.
11637          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11638          *
11639          * Unfortunately, for PCI-E there are only limited
11640          * write-side controls for this, and thus for reads
11641          * we will still get the disconnects.  We'll also waste
11642          * these PCI cycles for both read and write for chips
11643          * other than 5700 and 5701 which do not implement the
11644          * boundary bits.
11645          */
11646         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11647             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11648                 switch (cacheline_size) {
11649                 case 16:
11650                 case 32:
11651                 case 64:
11652                 case 128:
11653                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11654                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11655                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11656                         } else {
11657                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11658                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11659                         }
11660                         break;
11661
11662                 case 256:
11663                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11664                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11665                         break;
11666
11667                 default:
11668                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11669                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11670                         break;
11671                 };
11672         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11673                 switch (cacheline_size) {
11674                 case 16:
11675                 case 32:
11676                 case 64:
11677                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11678                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11679                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11680                                 break;
11681                         }
11682                         /* fallthrough */
11683                 case 128:
11684                 default:
11685                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11686                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11687                         break;
11688                 };
11689         } else {
11690                 switch (cacheline_size) {
11691                 case 16:
11692                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11693                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11694                                         DMA_RWCTRL_WRITE_BNDRY_16);
11695                                 break;
11696                         }
11697                         /* fallthrough */
11698                 case 32:
11699                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11700                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11701                                         DMA_RWCTRL_WRITE_BNDRY_32);
11702                                 break;
11703                         }
11704                         /* fallthrough */
11705                 case 64:
11706                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11707                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11708                                         DMA_RWCTRL_WRITE_BNDRY_64);
11709                                 break;
11710                         }
11711                         /* fallthrough */
11712                 case 128:
11713                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11714                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11715                                         DMA_RWCTRL_WRITE_BNDRY_128);
11716                                 break;
11717                         }
11718                         /* fallthrough */
11719                 case 256:
11720                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11721                                 DMA_RWCTRL_WRITE_BNDRY_256);
11722                         break;
11723                 case 512:
11724                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11725                                 DMA_RWCTRL_WRITE_BNDRY_512);
11726                         break;
11727                 case 1024:
11728                 default:
11729                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11730                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11731                         break;
11732                 };
11733         }
11734
11735 out:
11736         return val;
11737 }
11738
11739 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11740 {
11741         struct tg3_internal_buffer_desc test_desc;
11742         u32 sram_dma_descs;
11743         int i, ret;
11744
11745         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11746
11747         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11748         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11749         tw32(RDMAC_STATUS, 0);
11750         tw32(WDMAC_STATUS, 0);
11751
11752         tw32(BUFMGR_MODE, 0);
11753         tw32(FTQ_RESET, 0);
11754
11755         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11756         test_desc.addr_lo = buf_dma & 0xffffffff;
11757         test_desc.nic_mbuf = 0x00002100;
11758         test_desc.len = size;
11759
11760         /*
11761          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11762          * the *second* time the tg3 driver was getting loaded after an
11763          * initial scan.
11764          *
11765          * Broadcom tells me:
11766          *   ...the DMA engine is connected to the GRC block and a DMA
11767          *   reset may affect the GRC block in some unpredictable way...
11768          *   The behavior of resets to individual blocks has not been tested.
11769          *
11770          * Broadcom noted the GRC reset will also reset all sub-components.
11771          */
11772         if (to_device) {
11773                 test_desc.cqid_sqid = (13 << 8) | 2;
11774
11775                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11776                 udelay(40);
11777         } else {
11778                 test_desc.cqid_sqid = (16 << 8) | 7;
11779
11780                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11781                 udelay(40);
11782         }
11783         test_desc.flags = 0x00000005;
11784
11785         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11786                 u32 val;
11787
11788                 val = *(((u32 *)&test_desc) + i);
11789                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11790                                        sram_dma_descs + (i * sizeof(u32)));
11791                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11792         }
11793         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11794
11795         if (to_device) {
11796                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11797         } else {
11798                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11799         }
11800
11801         ret = -ENODEV;
11802         for (i = 0; i < 40; i++) {
11803                 u32 val;
11804
11805                 if (to_device)
11806                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11807                 else
11808                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11809                 if ((val & 0xffff) == sram_dma_descs) {
11810                         ret = 0;
11811                         break;
11812                 }
11813
11814                 udelay(100);
11815         }
11816
11817         return ret;
11818 }
11819
11820 #define TEST_BUFFER_SIZE        0x2000
11821
11822 static int __devinit tg3_test_dma(struct tg3 *tp)
11823 {
11824         dma_addr_t buf_dma;
11825         u32 *buf, saved_dma_rwctrl;
11826         int ret;
11827
11828         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11829         if (!buf) {
11830                 ret = -ENOMEM;
11831                 goto out_nofree;
11832         }
11833
11834         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11835                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11836
11837         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11838
11839         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11840                 /* DMA read watermark not used on PCIE */
11841                 tp->dma_rwctrl |= 0x00180000;
11842         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11843                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11844                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11845                         tp->dma_rwctrl |= 0x003f0000;
11846                 else
11847                         tp->dma_rwctrl |= 0x003f000f;
11848         } else {
11849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11850                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11851                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11852                         u32 read_water = 0x7;
11853
11854                         /* If the 5704 is behind the EPB bridge, we can
11855                          * do the less restrictive ONE_DMA workaround for
11856                          * better performance.
11857                          */
11858                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11859                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11860                                 tp->dma_rwctrl |= 0x8000;
11861                         else if (ccval == 0x6 || ccval == 0x7)
11862                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11863
11864                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11865                                 read_water = 4;
11866                         /* Set bit 23 to enable PCIX hw bug fix */
11867                         tp->dma_rwctrl |=
11868                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11869                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11870                                 (1 << 23);
11871                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11872                         /* 5780 always in PCIX mode */
11873                         tp->dma_rwctrl |= 0x00144000;
11874                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11875                         /* 5714 always in PCIX mode */
11876                         tp->dma_rwctrl |= 0x00148000;
11877                 } else {
11878                         tp->dma_rwctrl |= 0x001b000f;
11879                 }
11880         }
11881
11882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11883             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11884                 tp->dma_rwctrl &= 0xfffffff0;
11885
11886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11887             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11888                 /* Remove this if it causes problems for some boards. */
11889                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11890
11891                 /* On 5700/5701 chips, we need to set this bit.
11892                  * Otherwise the chip will issue cacheline transactions
11893                  * to streamable DMA memory with not all the byte
11894                  * enables turned on.  This is an error on several
11895                  * RISC PCI controllers, in particular sparc64.
11896                  *
11897                  * On 5703/5704 chips, this bit has been reassigned
11898                  * a different meaning.  In particular, it is used
11899                  * on those chips to enable a PCI-X workaround.
11900                  */
11901                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11902         }
11903
11904         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11905
11906 #if 0
11907         /* Unneeded, already done by tg3_get_invariants.  */
11908         tg3_switch_clocks(tp);
11909 #endif
11910
11911         ret = 0;
11912         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11913             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11914                 goto out;
11915
11916         /* It is best to perform DMA test with maximum write burst size
11917          * to expose the 5700/5701 write DMA bug.
11918          */
11919         saved_dma_rwctrl = tp->dma_rwctrl;
11920         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11921         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11922
11923         while (1) {
11924                 u32 *p = buf, i;
11925
11926                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11927                         p[i] = i;
11928
11929                 /* Send the buffer to the chip. */
11930                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11931                 if (ret) {
11932                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11933                         break;
11934                 }
11935
11936 #if 0
11937                 /* validate data reached card RAM correctly. */
11938                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11939                         u32 val;
11940                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11941                         if (le32_to_cpu(val) != p[i]) {
11942                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11943                                 /* ret = -ENODEV here? */
11944                         }
11945                         p[i] = 0;
11946                 }
11947 #endif
11948                 /* Now read it back. */
11949                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11950                 if (ret) {
11951                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11952
11953                         break;
11954                 }
11955
11956                 /* Verify it. */
11957                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11958                         if (p[i] == i)
11959                                 continue;
11960
11961                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11962                             DMA_RWCTRL_WRITE_BNDRY_16) {
11963                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11964                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11965                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11966                                 break;
11967                         } else {
11968                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11969                                 ret = -ENODEV;
11970                                 goto out;
11971                         }
11972                 }
11973
11974                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11975                         /* Success. */
11976                         ret = 0;
11977                         break;
11978                 }
11979         }
11980         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11981             DMA_RWCTRL_WRITE_BNDRY_16) {
11982                 static struct pci_device_id dma_wait_state_chipsets[] = {
11983                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11984                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11985                         { },
11986                 };
11987
11988                 /* DMA test passed without adjusting DMA boundary,
11989                  * now look for chipsets that are known to expose the
11990                  * DMA bug without failing the test.
11991                  */
11992                 if (pci_dev_present(dma_wait_state_chipsets)) {
11993                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11994                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11995                 }
11996                 else
11997                         /* Safe to use the calculated DMA boundary. */
11998                         tp->dma_rwctrl = saved_dma_rwctrl;
11999
12000                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12001         }
12002
12003 out:
12004         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12005 out_nofree:
12006         return ret;
12007 }
12008
12009 static void __devinit tg3_init_link_config(struct tg3 *tp)
12010 {
12011         tp->link_config.advertising =
12012                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12013                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12014                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12015                  ADVERTISED_Autoneg | ADVERTISED_MII);
12016         tp->link_config.speed = SPEED_INVALID;
12017         tp->link_config.duplex = DUPLEX_INVALID;
12018         tp->link_config.autoneg = AUTONEG_ENABLE;
12019         tp->link_config.active_speed = SPEED_INVALID;
12020         tp->link_config.active_duplex = DUPLEX_INVALID;
12021         tp->link_config.phy_is_low_power = 0;
12022         tp->link_config.orig_speed = SPEED_INVALID;
12023         tp->link_config.orig_duplex = DUPLEX_INVALID;
12024         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12025 }
12026
12027 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12028 {
12029         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12030                 tp->bufmgr_config.mbuf_read_dma_low_water =
12031                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12032                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12033                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12034                 tp->bufmgr_config.mbuf_high_water =
12035                         DEFAULT_MB_HIGH_WATER_5705;
12036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12037                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12038                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12039                         tp->bufmgr_config.mbuf_high_water =
12040                                 DEFAULT_MB_HIGH_WATER_5906;
12041                 }
12042
12043                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12044                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12045                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12046                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12047                 tp->bufmgr_config.mbuf_high_water_jumbo =
12048                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12049         } else {
12050                 tp->bufmgr_config.mbuf_read_dma_low_water =
12051                         DEFAULT_MB_RDMA_LOW_WATER;
12052                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12053                         DEFAULT_MB_MACRX_LOW_WATER;
12054                 tp->bufmgr_config.mbuf_high_water =
12055                         DEFAULT_MB_HIGH_WATER;
12056
12057                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12058                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12059                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12060                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12061                 tp->bufmgr_config.mbuf_high_water_jumbo =
12062                         DEFAULT_MB_HIGH_WATER_JUMBO;
12063         }
12064
12065         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12066         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12067 }
12068
12069 static char * __devinit tg3_phy_string(struct tg3 *tp)
12070 {
12071         switch (tp->phy_id & PHY_ID_MASK) {
12072         case PHY_ID_BCM5400:    return "5400";
12073         case PHY_ID_BCM5401:    return "5401";
12074         case PHY_ID_BCM5411:    return "5411";
12075         case PHY_ID_BCM5701:    return "5701";
12076         case PHY_ID_BCM5703:    return "5703";
12077         case PHY_ID_BCM5704:    return "5704";
12078         case PHY_ID_BCM5705:    return "5705";
12079         case PHY_ID_BCM5750:    return "5750";
12080         case PHY_ID_BCM5752:    return "5752";
12081         case PHY_ID_BCM5714:    return "5714";
12082         case PHY_ID_BCM5780:    return "5780";
12083         case PHY_ID_BCM5755:    return "5755";
12084         case PHY_ID_BCM5787:    return "5787";
12085         case PHY_ID_BCM5784:    return "5784";
12086         case PHY_ID_BCM5756:    return "5722/5756";
12087         case PHY_ID_BCM5906:    return "5906";
12088         case PHY_ID_BCM5761:    return "5761";
12089         case PHY_ID_BCM8002:    return "8002/serdes";
12090         case 0:                 return "serdes";
12091         default:                return "unknown";
12092         };
12093 }
12094
12095 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12096 {
12097         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12098                 strcpy(str, "PCI Express");
12099                 return str;
12100         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12101                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12102
12103                 strcpy(str, "PCIX:");
12104
12105                 if ((clock_ctrl == 7) ||
12106                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12107                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12108                         strcat(str, "133MHz");
12109                 else if (clock_ctrl == 0)
12110                         strcat(str, "33MHz");
12111                 else if (clock_ctrl == 2)
12112                         strcat(str, "50MHz");
12113                 else if (clock_ctrl == 4)
12114                         strcat(str, "66MHz");
12115                 else if (clock_ctrl == 6)
12116                         strcat(str, "100MHz");
12117         } else {
12118                 strcpy(str, "PCI:");
12119                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12120                         strcat(str, "66MHz");
12121                 else
12122                         strcat(str, "33MHz");
12123         }
12124         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12125                 strcat(str, ":32-bit");
12126         else
12127                 strcat(str, ":64-bit");
12128         return str;
12129 }
12130
12131 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12132 {
12133         struct pci_dev *peer;
12134         unsigned int func, devnr = tp->pdev->devfn & ~7;
12135
12136         for (func = 0; func < 8; func++) {
12137                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12138                 if (peer && peer != tp->pdev)
12139                         break;
12140                 pci_dev_put(peer);
12141         }
12142         /* 5704 can be configured in single-port mode, set peer to
12143          * tp->pdev in that case.
12144          */
12145         if (!peer) {
12146                 peer = tp->pdev;
12147                 return peer;
12148         }
12149
12150         /*
12151          * We don't need to keep the refcount elevated; there's no way
12152          * to remove one half of this device without removing the other
12153          */
12154         pci_dev_put(peer);
12155
12156         return peer;
12157 }
12158
12159 static void __devinit tg3_init_coal(struct tg3 *tp)
12160 {
12161         struct ethtool_coalesce *ec = &tp->coal;
12162
12163         memset(ec, 0, sizeof(*ec));
12164         ec->cmd = ETHTOOL_GCOALESCE;
12165         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12166         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12167         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12168         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12169         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12170         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12171         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12172         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12173         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12174
12175         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12176                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12177                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12178                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12179                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12180                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12181         }
12182
12183         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12184                 ec->rx_coalesce_usecs_irq = 0;
12185                 ec->tx_coalesce_usecs_irq = 0;
12186                 ec->stats_block_coalesce_usecs = 0;
12187         }
12188 }
12189
12190 static int __devinit tg3_init_one(struct pci_dev *pdev,
12191                                   const struct pci_device_id *ent)
12192 {
12193         static int tg3_version_printed = 0;
12194         unsigned long tg3reg_base, tg3reg_len;
12195         struct net_device *dev;
12196         struct tg3 *tp;
12197         int i, err, pm_cap;
12198         char str[40];
12199         u64 dma_mask, persist_dma_mask;
12200
12201         if (tg3_version_printed++ == 0)
12202                 printk(KERN_INFO "%s", version);
12203
12204         err = pci_enable_device(pdev);
12205         if (err) {
12206                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12207                        "aborting.\n");
12208                 return err;
12209         }
12210
12211         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12212                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12213                        "base address, aborting.\n");
12214                 err = -ENODEV;
12215                 goto err_out_disable_pdev;
12216         }
12217
12218         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12219         if (err) {
12220                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12221                        "aborting.\n");
12222                 goto err_out_disable_pdev;
12223         }
12224
12225         pci_set_master(pdev);
12226
12227         /* Find power-management capability. */
12228         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12229         if (pm_cap == 0) {
12230                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12231                        "aborting.\n");
12232                 err = -EIO;
12233                 goto err_out_free_res;
12234         }
12235
12236         tg3reg_base = pci_resource_start(pdev, 0);
12237         tg3reg_len = pci_resource_len(pdev, 0);
12238
12239         dev = alloc_etherdev(sizeof(*tp));
12240         if (!dev) {
12241                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12242                 err = -ENOMEM;
12243                 goto err_out_free_res;
12244         }
12245
12246         SET_NETDEV_DEV(dev, &pdev->dev);
12247
12248 #if TG3_VLAN_TAG_USED
12249         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12250         dev->vlan_rx_register = tg3_vlan_rx_register;
12251 #endif
12252
12253         tp = netdev_priv(dev);
12254         tp->pdev = pdev;
12255         tp->dev = dev;
12256         tp->pm_cap = pm_cap;
12257         tp->mac_mode = TG3_DEF_MAC_MODE;
12258         tp->rx_mode = TG3_DEF_RX_MODE;
12259         tp->tx_mode = TG3_DEF_TX_MODE;
12260         tp->mi_mode = MAC_MI_MODE_BASE;
12261         if (tg3_debug > 0)
12262                 tp->msg_enable = tg3_debug;
12263         else
12264                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12265
12266         /* The word/byte swap controls here control register access byte
12267          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12268          * setting below.
12269          */
12270         tp->misc_host_ctrl =
12271                 MISC_HOST_CTRL_MASK_PCI_INT |
12272                 MISC_HOST_CTRL_WORD_SWAP |
12273                 MISC_HOST_CTRL_INDIR_ACCESS |
12274                 MISC_HOST_CTRL_PCISTATE_RW;
12275
12276         /* The NONFRM (non-frame) byte/word swap controls take effect
12277          * on descriptor entries, anything which isn't packet data.
12278          *
12279          * The StrongARM chips on the board (one for tx, one for rx)
12280          * are running in big-endian mode.
12281          */
12282         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12283                         GRC_MODE_WSWAP_NONFRM_DATA);
12284 #ifdef __BIG_ENDIAN
12285         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12286 #endif
12287         spin_lock_init(&tp->lock);
12288         spin_lock_init(&tp->indirect_lock);
12289         INIT_WORK(&tp->reset_task, tg3_reset_task);
12290
12291         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12292         if (!tp->regs) {
12293                 printk(KERN_ERR PFX "Cannot map device registers, "
12294                        "aborting.\n");
12295                 err = -ENOMEM;
12296                 goto err_out_free_dev;
12297         }
12298
12299         tg3_init_link_config(tp);
12300
12301         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12302         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12303         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12304
12305         dev->open = tg3_open;
12306         dev->stop = tg3_close;
12307         dev->get_stats = tg3_get_stats;
12308         dev->set_multicast_list = tg3_set_rx_mode;
12309         dev->set_mac_address = tg3_set_mac_addr;
12310         dev->do_ioctl = tg3_ioctl;
12311         dev->tx_timeout = tg3_tx_timeout;
12312         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12313         dev->ethtool_ops = &tg3_ethtool_ops;
12314         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12315         dev->change_mtu = tg3_change_mtu;
12316         dev->irq = pdev->irq;
12317 #ifdef CONFIG_NET_POLL_CONTROLLER
12318         dev->poll_controller = tg3_poll_controller;
12319 #endif
12320
12321         err = tg3_get_invariants(tp);
12322         if (err) {
12323                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12324                        "aborting.\n");
12325                 goto err_out_iounmap;
12326         }
12327
12328         /* The EPB bridge inside 5714, 5715, and 5780 and any
12329          * device behind the EPB cannot support DMA addresses > 40-bit.
12330          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12331          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12332          * do DMA address check in tg3_start_xmit().
12333          */
12334         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12335                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12336         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12337                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12338 #ifdef CONFIG_HIGHMEM
12339                 dma_mask = DMA_64BIT_MASK;
12340 #endif
12341         } else
12342                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12343
12344         /* Configure DMA attributes. */
12345         if (dma_mask > DMA_32BIT_MASK) {
12346                 err = pci_set_dma_mask(pdev, dma_mask);
12347                 if (!err) {
12348                         dev->features |= NETIF_F_HIGHDMA;
12349                         err = pci_set_consistent_dma_mask(pdev,
12350                                                           persist_dma_mask);
12351                         if (err < 0) {
12352                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12353                                        "DMA for consistent allocations\n");
12354                                 goto err_out_iounmap;
12355                         }
12356                 }
12357         }
12358         if (err || dma_mask == DMA_32BIT_MASK) {
12359                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12360                 if (err) {
12361                         printk(KERN_ERR PFX "No usable DMA configuration, "
12362                                "aborting.\n");
12363                         goto err_out_iounmap;
12364                 }
12365         }
12366
12367         tg3_init_bufmgr_config(tp);
12368
12369         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12370                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12371         }
12372         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12373             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12374             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12376             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12377                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12378         } else {
12379                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12380         }
12381
12382         /* TSO is on by default on chips that support hardware TSO.
12383          * Firmware TSO on older chips gives lower performance, so it
12384          * is off by default, but can be enabled using ethtool.
12385          */
12386         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12387                 dev->features |= NETIF_F_TSO;
12388                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12389                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12390                         dev->features |= NETIF_F_TSO6;
12391                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12392                         dev->features |= NETIF_F_TSO_ECN;
12393         }
12394
12395
12396         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12397             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12398             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12399                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12400                 tp->rx_pending = 63;
12401         }
12402
12403         err = tg3_get_device_address(tp);
12404         if (err) {
12405                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12406                        "aborting.\n");
12407                 goto err_out_iounmap;
12408         }
12409
12410         /*
12411          * Reset chip in case UNDI or EFI driver did not shutdown
12412          * DMA self test will enable WDMAC and we'll see (spurious)
12413          * pending DMA on the PCI bus at that point.
12414          */
12415         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12416             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12417                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12418                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12419         }
12420
12421         err = tg3_test_dma(tp);
12422         if (err) {
12423                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12424                 goto err_out_iounmap;
12425         }
12426
12427         /* Tigon3 can do ipv4 only... and some chips have buggy
12428          * checksumming.
12429          */
12430         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12431                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12432                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12433                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12434                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12435                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12436                         dev->features |= NETIF_F_IPV6_CSUM;
12437
12438                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12439         } else
12440                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12441
12442         /* flow control autonegotiation is default behavior */
12443         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12444
12445         tg3_init_coal(tp);
12446
12447         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12448                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12449                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12450                                "base address for APE, aborting.\n");
12451                         err = -ENODEV;
12452                         goto err_out_iounmap;
12453                 }
12454
12455                 tg3reg_base = pci_resource_start(pdev, 2);
12456                 tg3reg_len = pci_resource_len(pdev, 2);
12457
12458                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12459                 if (tp->aperegs == 0UL) {
12460                         printk(KERN_ERR PFX "Cannot map APE registers, "
12461                                "aborting.\n");
12462                         err = -ENOMEM;
12463                         goto err_out_iounmap;
12464                 }
12465
12466                 tg3_ape_lock_init(tp);
12467         }
12468
12469         pci_set_drvdata(pdev, dev);
12470
12471         err = register_netdev(dev);
12472         if (err) {
12473                 printk(KERN_ERR PFX "Cannot register net device, "
12474                        "aborting.\n");
12475                 goto err_out_apeunmap;
12476         }
12477
12478         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12479                dev->name,
12480                tp->board_part_number,
12481                tp->pci_chip_rev_id,
12482                tg3_phy_string(tp),
12483                tg3_bus_string(tp, str),
12484                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12485                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12486                  "10/100/1000Base-T")));
12487
12488         for (i = 0; i < 6; i++)
12489                 printk("%2.2x%c", dev->dev_addr[i],
12490                        i == 5 ? '\n' : ':');
12491
12492         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12493                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12494                dev->name,
12495                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12496                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12497                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12498                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12499                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12500                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12501         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12502                dev->name, tp->dma_rwctrl,
12503                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12504                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12505
12506         return 0;
12507
12508 err_out_apeunmap:
12509         if (tp->aperegs) {
12510                 iounmap(tp->aperegs);
12511                 tp->aperegs = NULL;
12512         }
12513
12514 err_out_iounmap:
12515         if (tp->regs) {
12516                 iounmap(tp->regs);
12517                 tp->regs = NULL;
12518         }
12519
12520 err_out_free_dev:
12521         free_netdev(dev);
12522
12523 err_out_free_res:
12524         pci_release_regions(pdev);
12525
12526 err_out_disable_pdev:
12527         pci_disable_device(pdev);
12528         pci_set_drvdata(pdev, NULL);
12529         return err;
12530 }
12531
12532 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12533 {
12534         struct net_device *dev = pci_get_drvdata(pdev);
12535
12536         if (dev) {
12537                 struct tg3 *tp = netdev_priv(dev);
12538
12539                 flush_scheduled_work();
12540                 unregister_netdev(dev);
12541                 if (tp->aperegs) {
12542                         iounmap(tp->aperegs);
12543                         tp->aperegs = NULL;
12544                 }
12545                 if (tp->regs) {
12546                         iounmap(tp->regs);
12547                         tp->regs = NULL;
12548                 }
12549                 free_netdev(dev);
12550                 pci_release_regions(pdev);
12551                 pci_disable_device(pdev);
12552                 pci_set_drvdata(pdev, NULL);
12553         }
12554 }
12555
12556 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12557 {
12558         struct net_device *dev = pci_get_drvdata(pdev);
12559         struct tg3 *tp = netdev_priv(dev);
12560         int err;
12561
12562         /* PCI register 4 needs to be saved whether netif_running() or not.
12563          * MSI address and data need to be saved if using MSI and
12564          * netif_running().
12565          */
12566         pci_save_state(pdev);
12567
12568         if (!netif_running(dev))
12569                 return 0;
12570
12571         flush_scheduled_work();
12572         tg3_netif_stop(tp);
12573
12574         del_timer_sync(&tp->timer);
12575
12576         tg3_full_lock(tp, 1);
12577         tg3_disable_ints(tp);
12578         tg3_full_unlock(tp);
12579
12580         netif_device_detach(dev);
12581
12582         tg3_full_lock(tp, 0);
12583         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12584         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12585         tg3_full_unlock(tp);
12586
12587         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12588         if (err) {
12589                 tg3_full_lock(tp, 0);
12590
12591                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12592                 if (tg3_restart_hw(tp, 1))
12593                         goto out;
12594
12595                 tp->timer.expires = jiffies + tp->timer_offset;
12596                 add_timer(&tp->timer);
12597
12598                 netif_device_attach(dev);
12599                 tg3_netif_start(tp);
12600
12601 out:
12602                 tg3_full_unlock(tp);
12603         }
12604
12605         return err;
12606 }
12607
12608 static int tg3_resume(struct pci_dev *pdev)
12609 {
12610         struct net_device *dev = pci_get_drvdata(pdev);
12611         struct tg3 *tp = netdev_priv(dev);
12612         int err;
12613
12614         pci_restore_state(tp->pdev);
12615
12616         if (!netif_running(dev))
12617                 return 0;
12618
12619         err = tg3_set_power_state(tp, PCI_D0);
12620         if (err)
12621                 return err;
12622
12623         /* Hardware bug - MSI won't work if INTX disabled. */
12624         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12625             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12626                 pci_intx(tp->pdev, 1);
12627
12628         netif_device_attach(dev);
12629
12630         tg3_full_lock(tp, 0);
12631
12632         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12633         err = tg3_restart_hw(tp, 1);
12634         if (err)
12635                 goto out;
12636
12637         tp->timer.expires = jiffies + tp->timer_offset;
12638         add_timer(&tp->timer);
12639
12640         tg3_netif_start(tp);
12641
12642 out:
12643         tg3_full_unlock(tp);
12644
12645         return err;
12646 }
12647
12648 static struct pci_driver tg3_driver = {
12649         .name           = DRV_MODULE_NAME,
12650         .id_table       = tg3_pci_tbl,
12651         .probe          = tg3_init_one,
12652         .remove         = __devexit_p(tg3_remove_one),
12653         .suspend        = tg3_suspend,
12654         .resume         = tg3_resume
12655 };
12656
12657 static int __init tg3_init(void)
12658 {
12659         return pci_register_driver(&tg3_driver);
12660 }
12661
12662 static void __exit tg3_cleanup(void)
12663 {
12664         pci_unregister_driver(&tg3_driver);
12665 }
12666
12667 module_init(tg3_init);
12668 module_exit(tg3_cleanup);