[TG3]: Update version to 3.83
[pandora-kernel.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.83"
68 #define DRV_MODULE_RELDATE      "October 10, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
212         {}
213 };
214
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
216
217 static const struct {
218         const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
220         { "rx_octets" },
221         { "rx_fragments" },
222         { "rx_ucast_packets" },
223         { "rx_mcast_packets" },
224         { "rx_bcast_packets" },
225         { "rx_fcs_errors" },
226         { "rx_align_errors" },
227         { "rx_xon_pause_rcvd" },
228         { "rx_xoff_pause_rcvd" },
229         { "rx_mac_ctrl_rcvd" },
230         { "rx_xoff_entered" },
231         { "rx_frame_too_long_errors" },
232         { "rx_jabbers" },
233         { "rx_undersize_packets" },
234         { "rx_in_length_errors" },
235         { "rx_out_length_errors" },
236         { "rx_64_or_less_octet_packets" },
237         { "rx_65_to_127_octet_packets" },
238         { "rx_128_to_255_octet_packets" },
239         { "rx_256_to_511_octet_packets" },
240         { "rx_512_to_1023_octet_packets" },
241         { "rx_1024_to_1522_octet_packets" },
242         { "rx_1523_to_2047_octet_packets" },
243         { "rx_2048_to_4095_octet_packets" },
244         { "rx_4096_to_8191_octet_packets" },
245         { "rx_8192_to_9022_octet_packets" },
246
247         { "tx_octets" },
248         { "tx_collisions" },
249
250         { "tx_xon_sent" },
251         { "tx_xoff_sent" },
252         { "tx_flow_control" },
253         { "tx_mac_errors" },
254         { "tx_single_collisions" },
255         { "tx_mult_collisions" },
256         { "tx_deferred" },
257         { "tx_excessive_collisions" },
258         { "tx_late_collisions" },
259         { "tx_collide_2times" },
260         { "tx_collide_3times" },
261         { "tx_collide_4times" },
262         { "tx_collide_5times" },
263         { "tx_collide_6times" },
264         { "tx_collide_7times" },
265         { "tx_collide_8times" },
266         { "tx_collide_9times" },
267         { "tx_collide_10times" },
268         { "tx_collide_11times" },
269         { "tx_collide_12times" },
270         { "tx_collide_13times" },
271         { "tx_collide_14times" },
272         { "tx_collide_15times" },
273         { "tx_ucast_packets" },
274         { "tx_mcast_packets" },
275         { "tx_bcast_packets" },
276         { "tx_carrier_sense_errors" },
277         { "tx_discards" },
278         { "tx_errors" },
279
280         { "dma_writeq_full" },
281         { "dma_write_prioq_full" },
282         { "rxbds_empty" },
283         { "rx_discards" },
284         { "rx_errors" },
285         { "rx_threshold_hit" },
286
287         { "dma_readq_full" },
288         { "dma_read_prioq_full" },
289         { "tx_comp_queue_full" },
290
291         { "ring_set_send_prod_index" },
292         { "ring_status_update" },
293         { "nic_irqs" },
294         { "nic_avoided_irqs" },
295         { "nic_tx_threshold_hit" }
296 };
297
298 static const struct {
299         const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301         { "nvram test     (online) " },
302         { "link test      (online) " },
303         { "register test  (offline)" },
304         { "memory test    (offline)" },
305         { "loopback test  (offline)" },
306         { "interrupt test (offline)" },
307 };
308
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
310 {
311         writel(val, tp->regs + off);
312 }
313
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
315 {
316         return (readl(tp->regs + off));
317 }
318
319 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
320 {
321         writel(val, tp->aperegs + off);
322 }
323
324 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
325 {
326         return (readl(tp->aperegs + off));
327 }
328
329 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
330 {
331         unsigned long flags;
332
333         spin_lock_irqsave(&tp->indirect_lock, flags);
334         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
336         spin_unlock_irqrestore(&tp->indirect_lock, flags);
337 }
338
339 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
340 {
341         writel(val, tp->regs + off);
342         readl(tp->regs + off);
343 }
344
345 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
346 {
347         unsigned long flags;
348         u32 val;
349
350         spin_lock_irqsave(&tp->indirect_lock, flags);
351         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353         spin_unlock_irqrestore(&tp->indirect_lock, flags);
354         return val;
355 }
356
357 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         unsigned long flags;
360
361         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363                                        TG3_64BIT_REG_LOW, val);
364                 return;
365         }
366         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368                                        TG3_64BIT_REG_LOW, val);
369                 return;
370         }
371
372         spin_lock_irqsave(&tp->indirect_lock, flags);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375         spin_unlock_irqrestore(&tp->indirect_lock, flags);
376
377         /* In indirect mode when disabling interrupts, we also need
378          * to clear the interrupt bit in the GRC local ctrl register.
379          */
380         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
381             (val == 0x1)) {
382                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
384         }
385 }
386
387 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 /* usec_wait specifies the wait time in usec when writing to certain registers
400  * where it is unsafe to read back the register without some delay.
401  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
403  */
404 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
405 {
406         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408                 /* Non-posted methods */
409                 tp->write32(tp, off, val);
410         else {
411                 /* Posted method */
412                 tg3_write32(tp, off, val);
413                 if (usec_wait)
414                         udelay(usec_wait);
415                 tp->read32(tp, off);
416         }
417         /* Wait again after the read for the posted method to guarantee that
418          * the wait time is met.
419          */
420         if (usec_wait)
421                 udelay(usec_wait);
422 }
423
424 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
425 {
426         tp->write32_mbox(tp, off, val);
427         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429                 tp->read32_mbox(tp, off);
430 }
431
432 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
433 {
434         void __iomem *mbox = tp->regs + off;
435         writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
437                 writel(val, mbox);
438         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
439                 readl(mbox);
440 }
441
442 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
443 {
444         return (readl(tp->regs + off + GRCMBOX_BASE));
445 }
446
447 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off + GRCMBOX_BASE);
450 }
451
452 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
453 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
454 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
455 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
456 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
457
458 #define tw32(reg,val)           tp->write32(tp, reg, val)
459 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
460 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
461 #define tr32(reg)               tp->read32(tp, reg)
462
463 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
464 {
465         unsigned long flags;
466
467         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
469                 return;
470
471         spin_lock_irqsave(&tp->indirect_lock, flags);
472         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
475
476                 /* Always leave this as zero. */
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         } else {
479                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
481
482                 /* Always leave this as zero. */
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
484         }
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 }
487
488 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
489 {
490         unsigned long flags;
491
492         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
494                 *val = 0;
495                 return;
496         }
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
502
503                 /* Always leave this as zero. */
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
505         } else {
506                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507                 *val = tr32(TG3PCI_MEM_WIN_DATA);
508
509                 /* Always leave this as zero. */
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         }
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 }
514
515 static void tg3_ape_lock_init(struct tg3 *tp)
516 {
517         int i;
518
519         /* Make sure the driver hasn't any stale locks. */
520         for (i = 0; i < 8; i++)
521                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522                                 APE_LOCK_GRANT_DRIVER);
523 }
524
525 static int tg3_ape_lock(struct tg3 *tp, int locknum)
526 {
527         int i, off;
528         int ret = 0;
529         u32 status;
530
531         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
532                 return 0;
533
534         switch (locknum) {
535                 case TG3_APE_LOCK_MEM:
536                         break;
537                 default:
538                         return -EINVAL;
539         }
540
541         off = 4 * locknum;
542
543         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
544
545         /* Wait for up to 1 millisecond to acquire lock. */
546         for (i = 0; i < 100; i++) {
547                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548                 if (status == APE_LOCK_GRANT_DRIVER)
549                         break;
550                 udelay(10);
551         }
552
553         if (status != APE_LOCK_GRANT_DRIVER) {
554                 /* Revoke the lock request. */
555                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556                                 APE_LOCK_GRANT_DRIVER);
557
558                 ret = -EBUSY;
559         }
560
561         return ret;
562 }
563
564 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
565 {
566         int off;
567
568         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
569                 return;
570
571         switch (locknum) {
572                 case TG3_APE_LOCK_MEM:
573                         break;
574                 default:
575                         return;
576         }
577
578         off = 4 * locknum;
579         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
580 }
581
582 static void tg3_disable_ints(struct tg3 *tp)
583 {
584         tw32(TG3PCI_MISC_HOST_CTRL,
585              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
586         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
587 }
588
589 static inline void tg3_cond_int(struct tg3 *tp)
590 {
591         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592             (tp->hw_status->status & SD_STATUS_UPDATED))
593                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
594         else
595                 tw32(HOSTCC_MODE, tp->coalesce_mode |
596                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
597 }
598
599 static void tg3_enable_ints(struct tg3 *tp)
600 {
601         tp->irq_sync = 0;
602         wmb();
603
604         tw32(TG3PCI_MISC_HOST_CTRL,
605              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
606         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607                        (tp->last_tag << 24));
608         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610                                (tp->last_tag << 24));
611         tg3_cond_int(tp);
612 }
613
614 static inline unsigned int tg3_has_work(struct tg3 *tp)
615 {
616         struct tg3_hw_status *sblk = tp->hw_status;
617         unsigned int work_exists = 0;
618
619         /* check for phy events */
620         if (!(tp->tg3_flags &
621               (TG3_FLAG_USE_LINKCHG_REG |
622                TG3_FLAG_POLL_SERDES))) {
623                 if (sblk->status & SD_STATUS_LINK_CHG)
624                         work_exists = 1;
625         }
626         /* check for RX/TX work to do */
627         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
629                 work_exists = 1;
630
631         return work_exists;
632 }
633
634 /* tg3_restart_ints
635  *  similar to tg3_enable_ints, but it accurately determines whether there
636  *  is new work pending and can return without flushing the PIO write
637  *  which reenables interrupts
638  */
639 static void tg3_restart_ints(struct tg3 *tp)
640 {
641         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
642                      tp->last_tag << 24);
643         mmiowb();
644
645         /* When doing tagged status, this work check is unnecessary.
646          * The last_tag we write above tells the chip which piece of
647          * work we've completed.
648          */
649         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
650             tg3_has_work(tp))
651                 tw32(HOSTCC_MODE, tp->coalesce_mode |
652                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
653 }
654
655 static inline void tg3_netif_stop(struct tg3 *tp)
656 {
657         tp->dev->trans_start = jiffies; /* prevent tx timeout */
658         napi_disable(&tp->napi);
659         netif_tx_disable(tp->dev);
660 }
661
662 static inline void tg3_netif_start(struct tg3 *tp)
663 {
664         netif_wake_queue(tp->dev);
665         /* NOTE: unconditional netif_wake_queue is only appropriate
666          * so long as all callers are assured to have free tx slots
667          * (such as after tg3_init_hw)
668          */
669         napi_enable(&tp->napi);
670         tp->hw_status->status |= SD_STATUS_UPDATED;
671         tg3_enable_ints(tp);
672 }
673
674 static void tg3_switch_clocks(struct tg3 *tp)
675 {
676         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
677         u32 orig_clock_ctrl;
678
679         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
681                 return;
682
683         orig_clock_ctrl = clock_ctrl;
684         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685                        CLOCK_CTRL_CLKRUN_OENABLE |
686                        0x1f);
687         tp->pci_clock_ctrl = clock_ctrl;
688
689         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
691                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
692                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
693                 }
694         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
695                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
696                             clock_ctrl |
697                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
698                             40);
699                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
701                             40);
702         }
703         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
704 }
705
706 #define PHY_BUSY_LOOPS  5000
707
708 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         *val = 0x0;
721
722         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723                       MI_COM_PHY_ADDR_MASK);
724         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725                       MI_COM_REG_ADDR_MASK);
726         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
727
728         tw32_f(MAC_MI_COM, frame_val);
729
730         loops = PHY_BUSY_LOOPS;
731         while (loops != 0) {
732                 udelay(10);
733                 frame_val = tr32(MAC_MI_COM);
734
735                 if ((frame_val & MI_COM_BUSY) == 0) {
736                         udelay(5);
737                         frame_val = tr32(MAC_MI_COM);
738                         break;
739                 }
740                 loops -= 1;
741         }
742
743         ret = -EBUSY;
744         if (loops != 0) {
745                 *val = frame_val & MI_COM_DATA_MASK;
746                 ret = 0;
747         }
748
749         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750                 tw32_f(MAC_MI_MODE, tp->mi_mode);
751                 udelay(80);
752         }
753
754         return ret;
755 }
756
757 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
758 {
759         u32 frame_val;
760         unsigned int loops;
761         int ret;
762
763         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
765                 return 0;
766
767         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
768                 tw32_f(MAC_MI_MODE,
769                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
770                 udelay(80);
771         }
772
773         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774                       MI_COM_PHY_ADDR_MASK);
775         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776                       MI_COM_REG_ADDR_MASK);
777         frame_val |= (val & MI_COM_DATA_MASK);
778         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
779
780         tw32_f(MAC_MI_COM, frame_val);
781
782         loops = PHY_BUSY_LOOPS;
783         while (loops != 0) {
784                 udelay(10);
785                 frame_val = tr32(MAC_MI_COM);
786                 if ((frame_val & MI_COM_BUSY) == 0) {
787                         udelay(5);
788                         frame_val = tr32(MAC_MI_COM);
789                         break;
790                 }
791                 loops -= 1;
792         }
793
794         ret = -EBUSY;
795         if (loops != 0)
796                 ret = 0;
797
798         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799                 tw32_f(MAC_MI_MODE, tp->mi_mode);
800                 udelay(80);
801         }
802
803         return ret;
804 }
805
806 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
807 {
808         u32 phy;
809
810         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
812                 return;
813
814         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
815                 u32 ephy;
816
817                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
819                                      ephy | MII_TG3_EPHY_SHADOW_EN);
820                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
821                                 if (enable)
822                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
823                                 else
824                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
826                         }
827                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
828                 }
829         } else {
830                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831                       MII_TG3_AUXCTL_SHDWSEL_MISC;
832                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
834                         if (enable)
835                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836                         else
837                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838                         phy |= MII_TG3_AUXCTL_MISC_WREN;
839                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
840                 }
841         }
842 }
843
844 static void tg3_phy_set_wirespeed(struct tg3 *tp)
845 {
846         u32 val;
847
848         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
849                 return;
850
851         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854                              (val | (1 << 15) | (1 << 4)));
855 }
856
857 static int tg3_bmcr_reset(struct tg3 *tp)
858 {
859         u32 phy_control;
860         int limit, err;
861
862         /* OK, reset it, and poll the BMCR_RESET bit until it
863          * clears or we time out.
864          */
865         phy_control = BMCR_RESET;
866         err = tg3_writephy(tp, MII_BMCR, phy_control);
867         if (err != 0)
868                 return -EBUSY;
869
870         limit = 5000;
871         while (limit--) {
872                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
873                 if (err != 0)
874                         return -EBUSY;
875
876                 if ((phy_control & BMCR_RESET) == 0) {
877                         udelay(40);
878                         break;
879                 }
880                 udelay(10);
881         }
882         if (limit <= 0)
883                 return -EBUSY;
884
885         return 0;
886 }
887
888 static int tg3_wait_macro_done(struct tg3 *tp)
889 {
890         int limit = 100;
891
892         while (limit--) {
893                 u32 tmp32;
894
895                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896                         if ((tmp32 & 0x1000) == 0)
897                                 break;
898                 }
899         }
900         if (limit <= 0)
901                 return -EBUSY;
902
903         return 0;
904 }
905
906 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
907 {
908         static const u32 test_pat[4][6] = {
909         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
913         };
914         int chan;
915
916         for (chan = 0; chan < 4; chan++) {
917                 int i;
918
919                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920                              (chan * 0x2000) | 0x0200);
921                 tg3_writephy(tp, 0x16, 0x0002);
922
923                 for (i = 0; i < 6; i++)
924                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
925                                      test_pat[chan][i]);
926
927                 tg3_writephy(tp, 0x16, 0x0202);
928                 if (tg3_wait_macro_done(tp)) {
929                         *resetp = 1;
930                         return -EBUSY;
931                 }
932
933                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934                              (chan * 0x2000) | 0x0200);
935                 tg3_writephy(tp, 0x16, 0x0082);
936                 if (tg3_wait_macro_done(tp)) {
937                         *resetp = 1;
938                         return -EBUSY;
939                 }
940
941                 tg3_writephy(tp, 0x16, 0x0802);
942                 if (tg3_wait_macro_done(tp)) {
943                         *resetp = 1;
944                         return -EBUSY;
945                 }
946
947                 for (i = 0; i < 6; i += 2) {
948                         u32 low, high;
949
950                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952                             tg3_wait_macro_done(tp)) {
953                                 *resetp = 1;
954                                 return -EBUSY;
955                         }
956                         low &= 0x7fff;
957                         high &= 0x000f;
958                         if (low != test_pat[chan][i] ||
959                             high != test_pat[chan][i+1]) {
960                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
963
964                                 return -EBUSY;
965                         }
966                 }
967         }
968
969         return 0;
970 }
971
972 static int tg3_phy_reset_chanpat(struct tg3 *tp)
973 {
974         int chan;
975
976         for (chan = 0; chan < 4; chan++) {
977                 int i;
978
979                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980                              (chan * 0x2000) | 0x0200);
981                 tg3_writephy(tp, 0x16, 0x0002);
982                 for (i = 0; i < 6; i++)
983                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984                 tg3_writephy(tp, 0x16, 0x0202);
985                 if (tg3_wait_macro_done(tp))
986                         return -EBUSY;
987         }
988
989         return 0;
990 }
991
992 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
993 {
994         u32 reg32, phy9_orig;
995         int retries, do_phy_reset, err;
996
997         retries = 10;
998         do_phy_reset = 1;
999         do {
1000                 if (do_phy_reset) {
1001                         err = tg3_bmcr_reset(tp);
1002                         if (err)
1003                                 return err;
1004                         do_phy_reset = 0;
1005                 }
1006
1007                 /* Disable transmitter and interrupt.  */
1008                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1009                         continue;
1010
1011                 reg32 |= 0x3000;
1012                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1013
1014                 /* Set full-duplex, 1000 mbps.  */
1015                 tg3_writephy(tp, MII_BMCR,
1016                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1017
1018                 /* Set to master mode.  */
1019                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1020                         continue;
1021
1022                 tg3_writephy(tp, MII_TG3_CTRL,
1023                              (MII_TG3_CTRL_AS_MASTER |
1024                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1025
1026                 /* Enable SM_DSP_CLOCK and 6dB.  */
1027                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1028
1029                 /* Block the PHY control access.  */
1030                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1032
1033                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1034                 if (!err)
1035                         break;
1036         } while (--retries);
1037
1038         err = tg3_phy_reset_chanpat(tp);
1039         if (err)
1040                 return err;
1041
1042         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1044
1045         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046         tg3_writephy(tp, 0x16, 0x0000);
1047
1048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050                 /* Set Extended packet length bit for jumbo frames */
1051                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1052         }
1053         else {
1054                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1055         }
1056
1057         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1058
1059         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1060                 reg32 &= ~0x3000;
1061                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1062         } else if (!err)
1063                 err = -EBUSY;
1064
1065         return err;
1066 }
1067
1068 static void tg3_link_report(struct tg3 *);
1069
1070 /* This will reset the tigon3 PHY if there is no valid
1071  * link unless the FORCE argument is non-zero.
1072  */
1073 static int tg3_phy_reset(struct tg3 *tp)
1074 {
1075         u32 phy_status;
1076         int err;
1077
1078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1079                 u32 val;
1080
1081                 val = tr32(GRC_MISC_CFG);
1082                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1083                 udelay(40);
1084         }
1085         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1086         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1087         if (err != 0)
1088                 return -EBUSY;
1089
1090         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091                 netif_carrier_off(tp->dev);
1092                 tg3_link_report(tp);
1093         }
1094
1095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098                 err = tg3_phy_reset_5703_4_5(tp);
1099                 if (err)
1100                         return err;
1101                 goto out;
1102         }
1103
1104         err = tg3_bmcr_reset(tp);
1105         if (err)
1106                 return err;
1107
1108 out:
1109         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1116         }
1117         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118                 tg3_writephy(tp, 0x1c, 0x8d68);
1119                 tg3_writephy(tp, 0x1c, 0x8d68);
1120         }
1121         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1130         }
1131         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136                         tg3_writephy(tp, MII_TG3_TEST1,
1137                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1138                 } else
1139                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1140                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1141         }
1142         /* Set Extended packet length bit (bit 14) on all chips that */
1143         /* support jumbo frames */
1144         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145                 /* Cannot do read-modify-write on 5401 */
1146                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1147         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1148                 u32 phy_reg;
1149
1150                 /* Set bit 14 with read-modify-write to preserve other bits */
1151                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1154         }
1155
1156         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157          * jumbo frames transmission.
1158          */
1159         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1160                 u32 phy_reg;
1161
1162                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1165         }
1166
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1168                 /* adjust output voltage */
1169                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1170         }
1171
1172         tg3_phy_toggle_automdix(tp, 1);
1173         tg3_phy_set_wirespeed(tp);
1174         return 0;
1175 }
1176
1177 static void tg3_frob_aux_power(struct tg3 *tp)
1178 {
1179         struct tg3 *tp_peer = tp;
1180
1181         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1182                 return;
1183
1184         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186                 struct net_device *dev_peer;
1187
1188                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1189                 /* remove_one() may have been run on the peer. */
1190                 if (!dev_peer)
1191                         tp_peer = tp;
1192                 else
1193                         tp_peer = netdev_priv(dev_peer);
1194         }
1195
1196         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1200                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1202                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203                                     (GRC_LCLCTRL_GPIO_OE0 |
1204                                      GRC_LCLCTRL_GPIO_OE1 |
1205                                      GRC_LCLCTRL_GPIO_OE2 |
1206                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1207                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1208                                     100);
1209                 } else {
1210                         u32 no_gpio2;
1211                         u32 grc_local_ctrl = 0;
1212
1213                         if (tp_peer != tp &&
1214                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1215                                 return;
1216
1217                         /* Workaround to prevent overdrawing Amps. */
1218                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1219                             ASIC_REV_5714) {
1220                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1221                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                             grc_local_ctrl, 100);
1223                         }
1224
1225                         /* On 5753 and variants, GPIO2 cannot be used. */
1226                         no_gpio2 = tp->nic_sram_data_cfg &
1227                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1228
1229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1230                                          GRC_LCLCTRL_GPIO_OE1 |
1231                                          GRC_LCLCTRL_GPIO_OE2 |
1232                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1233                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1234                         if (no_gpio2) {
1235                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1237                         }
1238                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239                                                     grc_local_ctrl, 100);
1240
1241                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1242
1243                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244                                                     grc_local_ctrl, 100);
1245
1246                         if (!no_gpio2) {
1247                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1248                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249                                             grc_local_ctrl, 100);
1250                         }
1251                 }
1252         } else {
1253                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255                         if (tp_peer != tp &&
1256                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1257                                 return;
1258
1259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260                                     (GRC_LCLCTRL_GPIO_OE1 |
1261                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                     GRC_LCLCTRL_GPIO_OE1, 100);
1265
1266                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267                                     (GRC_LCLCTRL_GPIO_OE1 |
1268                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1269                 }
1270         }
1271 }
1272
1273 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1274 {
1275         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1276                 return 1;
1277         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278                 if (speed != SPEED_10)
1279                         return 1;
1280         } else if (speed == SPEED_10)
1281                 return 1;
1282
1283         return 0;
1284 }
1285
1286 static int tg3_setup_phy(struct tg3 *, int);
1287
1288 #define RESET_KIND_SHUTDOWN     0
1289 #define RESET_KIND_INIT         1
1290 #define RESET_KIND_SUSPEND      2
1291
1292 static void tg3_write_sig_post_reset(struct tg3 *, int);
1293 static int tg3_halt_cpu(struct tg3 *, u32);
1294 static int tg3_nvram_lock(struct tg3 *);
1295 static void tg3_nvram_unlock(struct tg3 *);
1296
1297 static void tg3_power_down_phy(struct tg3 *tp)
1298 {
1299         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1303
1304                         sg_dig_ctrl |=
1305                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1308                 }
1309                 return;
1310         }
1311
1312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1313                 u32 val;
1314
1315                 tg3_bmcr_reset(tp);
1316                 val = tr32(GRC_MISC_CFG);
1317                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1318                 udelay(40);
1319                 return;
1320         } else {
1321                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1324         }
1325
1326         /* The PHY should not be powered down on some chips because
1327          * of bugs.
1328          */
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1333                 return;
1334         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1335 }
1336
1337 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1338 {
1339         u32 misc_host_ctrl;
1340         u16 power_control, power_caps;
1341         int pm = tp->pm_cap;
1342
1343         /* Make sure register accesses (indirect or otherwise)
1344          * will function correctly.
1345          */
1346         pci_write_config_dword(tp->pdev,
1347                                TG3PCI_MISC_HOST_CTRL,
1348                                tp->misc_host_ctrl);
1349
1350         pci_read_config_word(tp->pdev,
1351                              pm + PCI_PM_CTRL,
1352                              &power_control);
1353         power_control |= PCI_PM_CTRL_PME_STATUS;
1354         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1355         switch (state) {
1356         case PCI_D0:
1357                 power_control |= 0;
1358                 pci_write_config_word(tp->pdev,
1359                                       pm + PCI_PM_CTRL,
1360                                       power_control);
1361                 udelay(100);    /* Delay after power state change */
1362
1363                 /* Switch out of Vaux if it is a NIC */
1364                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1365                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1366
1367                 return 0;
1368
1369         case PCI_D1:
1370                 power_control |= 1;
1371                 break;
1372
1373         case PCI_D2:
1374                 power_control |= 2;
1375                 break;
1376
1377         case PCI_D3hot:
1378                 power_control |= 3;
1379                 break;
1380
1381         default:
1382                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1383                        "requested.\n",
1384                        tp->dev->name, state);
1385                 return -EINVAL;
1386         };
1387
1388         power_control |= PCI_PM_CTRL_PME_ENABLE;
1389
1390         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391         tw32(TG3PCI_MISC_HOST_CTRL,
1392              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1393
1394         if (tp->link_config.phy_is_low_power == 0) {
1395                 tp->link_config.phy_is_low_power = 1;
1396                 tp->link_config.orig_speed = tp->link_config.speed;
1397                 tp->link_config.orig_duplex = tp->link_config.duplex;
1398                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1399         }
1400
1401         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1402                 tp->link_config.speed = SPEED_10;
1403                 tp->link_config.duplex = DUPLEX_HALF;
1404                 tp->link_config.autoneg = AUTONEG_ENABLE;
1405                 tg3_setup_phy(tp, 0);
1406         }
1407
1408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1409                 u32 val;
1410
1411                 val = tr32(GRC_VCPU_EXT_CTRL);
1412                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1414                 int i;
1415                 u32 val;
1416
1417                 for (i = 0; i < 200; i++) {
1418                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1420                                 break;
1421                         msleep(1);
1422                 }
1423         }
1424         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426                                                      WOL_DRV_STATE_SHUTDOWN |
1427                                                      WOL_DRV_WOL |
1428                                                      WOL_SET_MAGIC_PKT);
1429
1430         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1431
1432         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1433                 u32 mac_mode;
1434
1435                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1437                         udelay(40);
1438
1439                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1441                         else
1442                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1443
1444                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1446                             ASIC_REV_5700) {
1447                                 u32 speed = (tp->tg3_flags &
1448                                              TG3_FLAG_WOL_SPEED_100MB) ?
1449                                              SPEED_100 : SPEED_10;
1450                                 if (tg3_5700_link_polarity(tp, speed))
1451                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1452                                 else
1453                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1454                         }
1455                 } else {
1456                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1457                 }
1458
1459                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1460                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1461
1462                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1465
1466                 tw32_f(MAC_MODE, mac_mode);
1467                 udelay(100);
1468
1469                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1470                 udelay(10);
1471         }
1472
1473         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1476                 u32 base_val;
1477
1478                 base_val = tp->pci_clock_ctrl;
1479                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480                              CLOCK_CTRL_TXCLK_DISABLE);
1481
1482                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1484         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1485                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1486                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1487                 /* do nothing */
1488         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1489                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490                 u32 newbits1, newbits2;
1491
1492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495                                     CLOCK_CTRL_TXCLK_DISABLE |
1496                                     CLOCK_CTRL_ALTCLK);
1497                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499                         newbits1 = CLOCK_CTRL_625_CORE;
1500                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1501                 } else {
1502                         newbits1 = CLOCK_CTRL_ALTCLK;
1503                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1504                 }
1505
1506                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1507                             40);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1510                             40);
1511
1512                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1513                         u32 newbits3;
1514
1515                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518                                             CLOCK_CTRL_TXCLK_DISABLE |
1519                                             CLOCK_CTRL_44MHZ_CORE);
1520                         } else {
1521                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1522                         }
1523
1524                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525                                     tp->pci_clock_ctrl | newbits3, 40);
1526                 }
1527         }
1528
1529         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1530             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1532                 tg3_power_down_phy(tp);
1533
1534         tg3_frob_aux_power(tp);
1535
1536         /* Workaround for unstable PLL clock */
1537         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539                 u32 val = tr32(0x7d00);
1540
1541                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1542                 tw32(0x7d00, val);
1543                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1544                         int err;
1545
1546                         err = tg3_nvram_lock(tp);
1547                         tg3_halt_cpu(tp, RX_CPU_BASE);
1548                         if (!err)
1549                                 tg3_nvram_unlock(tp);
1550                 }
1551         }
1552
1553         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1554
1555         /* Finally, set the new power state. */
1556         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1557         udelay(100);    /* Delay after power state change */
1558
1559         return 0;
1560 }
1561
1562 static void tg3_link_report(struct tg3 *tp)
1563 {
1564         if (!netif_carrier_ok(tp->dev)) {
1565                 if (netif_msg_link(tp))
1566                         printk(KERN_INFO PFX "%s: Link is down.\n",
1567                                tp->dev->name);
1568         } else if (netif_msg_link(tp)) {
1569                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1570                        tp->dev->name,
1571                        (tp->link_config.active_speed == SPEED_1000 ?
1572                         1000 :
1573                         (tp->link_config.active_speed == SPEED_100 ?
1574                          100 : 10)),
1575                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1576                         "full" : "half"));
1577
1578                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1579                        "%s for RX.\n",
1580                        tp->dev->name,
1581                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1583         }
1584 }
1585
1586 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1587 {
1588         u32 new_tg3_flags = 0;
1589         u32 old_rx_mode = tp->rx_mode;
1590         u32 old_tx_mode = tp->tx_mode;
1591
1592         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1593
1594                 /* Convert 1000BaseX flow control bits to 1000BaseT
1595                  * bits before resolving flow control.
1596                  */
1597                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599                                        ADVERTISE_PAUSE_ASYM);
1600                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1601
1602                         if (local_adv & ADVERTISE_1000XPAUSE)
1603                                 local_adv |= ADVERTISE_PAUSE_CAP;
1604                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1606                         if (remote_adv & LPA_1000XPAUSE)
1607                                 remote_adv |= LPA_PAUSE_CAP;
1608                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1609                                 remote_adv |= LPA_PAUSE_ASYM;
1610                 }
1611
1612                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614                                 if (remote_adv & LPA_PAUSE_CAP)
1615                                         new_tg3_flags |=
1616                                                 (TG3_FLAG_RX_PAUSE |
1617                                                 TG3_FLAG_TX_PAUSE);
1618                                 else if (remote_adv & LPA_PAUSE_ASYM)
1619                                         new_tg3_flags |=
1620                                                 (TG3_FLAG_RX_PAUSE);
1621                         } else {
1622                                 if (remote_adv & LPA_PAUSE_CAP)
1623                                         new_tg3_flags |=
1624                                                 (TG3_FLAG_RX_PAUSE |
1625                                                 TG3_FLAG_TX_PAUSE);
1626                         }
1627                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628                         if ((remote_adv & LPA_PAUSE_CAP) &&
1629                         (remote_adv & LPA_PAUSE_ASYM))
1630                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1631                 }
1632
1633                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634                 tp->tg3_flags |= new_tg3_flags;
1635         } else {
1636                 new_tg3_flags = tp->tg3_flags;
1637         }
1638
1639         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1641         else
1642                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1643
1644         if (old_rx_mode != tp->rx_mode) {
1645                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1646         }
1647
1648         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1650         else
1651                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1652
1653         if (old_tx_mode != tp->tx_mode) {
1654                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1655         }
1656 }
1657
1658 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1659 {
1660         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661         case MII_TG3_AUX_STAT_10HALF:
1662                 *speed = SPEED_10;
1663                 *duplex = DUPLEX_HALF;
1664                 break;
1665
1666         case MII_TG3_AUX_STAT_10FULL:
1667                 *speed = SPEED_10;
1668                 *duplex = DUPLEX_FULL;
1669                 break;
1670
1671         case MII_TG3_AUX_STAT_100HALF:
1672                 *speed = SPEED_100;
1673                 *duplex = DUPLEX_HALF;
1674                 break;
1675
1676         case MII_TG3_AUX_STAT_100FULL:
1677                 *speed = SPEED_100;
1678                 *duplex = DUPLEX_FULL;
1679                 break;
1680
1681         case MII_TG3_AUX_STAT_1000HALF:
1682                 *speed = SPEED_1000;
1683                 *duplex = DUPLEX_HALF;
1684                 break;
1685
1686         case MII_TG3_AUX_STAT_1000FULL:
1687                 *speed = SPEED_1000;
1688                 *duplex = DUPLEX_FULL;
1689                 break;
1690
1691         default:
1692                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1694                                  SPEED_10;
1695                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1696                                   DUPLEX_HALF;
1697                         break;
1698                 }
1699                 *speed = SPEED_INVALID;
1700                 *duplex = DUPLEX_INVALID;
1701                 break;
1702         };
1703 }
1704
1705 static void tg3_phy_copper_begin(struct tg3 *tp)
1706 {
1707         u32 new_adv;
1708         int i;
1709
1710         if (tp->link_config.phy_is_low_power) {
1711                 /* Entering low power mode.  Disable gigabit and
1712                  * 100baseT advertisements.
1713                  */
1714                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1715
1716                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1720
1721                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722         } else if (tp->link_config.speed == SPEED_INVALID) {
1723                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724                         tp->link_config.advertising &=
1725                                 ~(ADVERTISED_1000baseT_Half |
1726                                   ADVERTISED_1000baseT_Full);
1727
1728                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730                         new_adv |= ADVERTISE_10HALF;
1731                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732                         new_adv |= ADVERTISE_10FULL;
1733                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734                         new_adv |= ADVERTISE_100HALF;
1735                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736                         new_adv |= ADVERTISE_100FULL;
1737                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1738
1739                 if (tp->link_config.advertising &
1740                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1741                         new_adv = 0;
1742                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1751                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1752                 } else {
1753                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1754                 }
1755         } else {
1756                 /* Asking for a specific link mode. */
1757                 if (tp->link_config.speed == SPEED_1000) {
1758                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1760
1761                         if (tp->link_config.duplex == DUPLEX_FULL)
1762                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1763                         else
1764                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1769                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1770                 } else {
1771                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1772
1773                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774                         if (tp->link_config.speed == SPEED_100) {
1775                                 if (tp->link_config.duplex == DUPLEX_FULL)
1776                                         new_adv |= ADVERTISE_100FULL;
1777                                 else
1778                                         new_adv |= ADVERTISE_100HALF;
1779                         } else {
1780                                 if (tp->link_config.duplex == DUPLEX_FULL)
1781                                         new_adv |= ADVERTISE_10FULL;
1782                                 else
1783                                         new_adv |= ADVERTISE_10HALF;
1784                         }
1785                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1786                 }
1787         }
1788
1789         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790             tp->link_config.speed != SPEED_INVALID) {
1791                 u32 bmcr, orig_bmcr;
1792
1793                 tp->link_config.active_speed = tp->link_config.speed;
1794                 tp->link_config.active_duplex = tp->link_config.duplex;
1795
1796                 bmcr = 0;
1797                 switch (tp->link_config.speed) {
1798                 default:
1799                 case SPEED_10:
1800                         break;
1801
1802                 case SPEED_100:
1803                         bmcr |= BMCR_SPEED100;
1804                         break;
1805
1806                 case SPEED_1000:
1807                         bmcr |= TG3_BMCR_SPEED1000;
1808                         break;
1809                 };
1810
1811                 if (tp->link_config.duplex == DUPLEX_FULL)
1812                         bmcr |= BMCR_FULLDPLX;
1813
1814                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815                     (bmcr != orig_bmcr)) {
1816                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817                         for (i = 0; i < 1500; i++) {
1818                                 u32 tmp;
1819
1820                                 udelay(10);
1821                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822                                     tg3_readphy(tp, MII_BMSR, &tmp))
1823                                         continue;
1824                                 if (!(tmp & BMSR_LSTATUS)) {
1825                                         udelay(40);
1826                                         break;
1827                                 }
1828                         }
1829                         tg3_writephy(tp, MII_BMCR, bmcr);
1830                         udelay(40);
1831                 }
1832         } else {
1833                 tg3_writephy(tp, MII_BMCR,
1834                              BMCR_ANENABLE | BMCR_ANRESTART);
1835         }
1836 }
1837
1838 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1839 {
1840         int err;
1841
1842         /* Turn off tap power management. */
1843         /* Set Extended packet length bit */
1844         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1845
1846         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1848
1849         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1851
1852         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1854
1855         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1857
1858         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1860
1861         udelay(40);
1862
1863         return err;
1864 }
1865
1866 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1867 {
1868         u32 adv_reg, all_mask = 0;
1869
1870         if (mask & ADVERTISED_10baseT_Half)
1871                 all_mask |= ADVERTISE_10HALF;
1872         if (mask & ADVERTISED_10baseT_Full)
1873                 all_mask |= ADVERTISE_10FULL;
1874         if (mask & ADVERTISED_100baseT_Half)
1875                 all_mask |= ADVERTISE_100HALF;
1876         if (mask & ADVERTISED_100baseT_Full)
1877                 all_mask |= ADVERTISE_100FULL;
1878
1879         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1880                 return 0;
1881
1882         if ((adv_reg & all_mask) != all_mask)
1883                 return 0;
1884         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1885                 u32 tg3_ctrl;
1886
1887                 all_mask = 0;
1888                 if (mask & ADVERTISED_1000baseT_Half)
1889                         all_mask |= ADVERTISE_1000HALF;
1890                 if (mask & ADVERTISED_1000baseT_Full)
1891                         all_mask |= ADVERTISE_1000FULL;
1892
1893                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1894                         return 0;
1895
1896                 if ((tg3_ctrl & all_mask) != all_mask)
1897                         return 0;
1898         }
1899         return 1;
1900 }
1901
1902 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1903 {
1904         int current_link_up;
1905         u32 bmsr, dummy;
1906         u16 current_speed;
1907         u8 current_duplex;
1908         int i, err;
1909
1910         tw32(MAC_EVENT, 0);
1911
1912         tw32_f(MAC_STATUS,
1913              (MAC_STATUS_SYNC_CHANGED |
1914               MAC_STATUS_CFG_CHANGED |
1915               MAC_STATUS_MI_COMPLETION |
1916               MAC_STATUS_LNKSTATE_CHANGED));
1917         udelay(40);
1918
1919         tp->mi_mode = MAC_MI_MODE_BASE;
1920         tw32_f(MAC_MI_MODE, tp->mi_mode);
1921         udelay(80);
1922
1923         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1924
1925         /* Some third-party PHYs need to be reset on link going
1926          * down.
1927          */
1928         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931             netif_carrier_ok(tp->dev)) {
1932                 tg3_readphy(tp, MII_BMSR, &bmsr);
1933                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934                     !(bmsr & BMSR_LSTATUS))
1935                         force_reset = 1;
1936         }
1937         if (force_reset)
1938                 tg3_phy_reset(tp);
1939
1940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941                 tg3_readphy(tp, MII_BMSR, &bmsr);
1942                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1944                         bmsr = 0;
1945
1946                 if (!(bmsr & BMSR_LSTATUS)) {
1947                         err = tg3_init_5401phy_dsp(tp);
1948                         if (err)
1949                                 return err;
1950
1951                         tg3_readphy(tp, MII_BMSR, &bmsr);
1952                         for (i = 0; i < 1000; i++) {
1953                                 udelay(10);
1954                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955                                     (bmsr & BMSR_LSTATUS)) {
1956                                         udelay(40);
1957                                         break;
1958                                 }
1959                         }
1960
1961                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962                             !(bmsr & BMSR_LSTATUS) &&
1963                             tp->link_config.active_speed == SPEED_1000) {
1964                                 err = tg3_phy_reset(tp);
1965                                 if (!err)
1966                                         err = tg3_init_5401phy_dsp(tp);
1967                                 if (err)
1968                                         return err;
1969                         }
1970                 }
1971         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973                 /* 5701 {A0,B0} CRC bug workaround */
1974                 tg3_writephy(tp, 0x15, 0x0a75);
1975                 tg3_writephy(tp, 0x1c, 0x8c68);
1976                 tg3_writephy(tp, 0x1c, 0x8d68);
1977                 tg3_writephy(tp, 0x1c, 0x8c68);
1978         }
1979
1980         /* Clear pending interrupts... */
1981         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1983
1984         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1986         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1987                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1988
1989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1994                 else
1995                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1996         }
1997
1998         current_link_up = 0;
1999         current_speed = SPEED_INVALID;
2000         current_duplex = DUPLEX_INVALID;
2001
2002         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2003                 u32 val;
2004
2005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007                 if (!(val & (1 << 10))) {
2008                         val |= (1 << 10);
2009                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2010                         goto relink;
2011                 }
2012         }
2013
2014         bmsr = 0;
2015         for (i = 0; i < 100; i++) {
2016                 tg3_readphy(tp, MII_BMSR, &bmsr);
2017                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018                     (bmsr & BMSR_LSTATUS))
2019                         break;
2020                 udelay(40);
2021         }
2022
2023         if (bmsr & BMSR_LSTATUS) {
2024                 u32 aux_stat, bmcr;
2025
2026                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027                 for (i = 0; i < 2000; i++) {
2028                         udelay(10);
2029                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2030                             aux_stat)
2031                                 break;
2032                 }
2033
2034                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2035                                              &current_speed,
2036                                              &current_duplex);
2037
2038                 bmcr = 0;
2039                 for (i = 0; i < 200; i++) {
2040                         tg3_readphy(tp, MII_BMCR, &bmcr);
2041                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2042                                 continue;
2043                         if (bmcr && bmcr != 0x7fff)
2044                                 break;
2045                         udelay(10);
2046                 }
2047
2048                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049                         if (bmcr & BMCR_ANENABLE) {
2050                                 current_link_up = 1;
2051
2052                                 /* Force autoneg restart if we are exiting
2053                                  * low power mode.
2054                                  */
2055                                 if (!tg3_copper_is_advertising_all(tp,
2056                                                 tp->link_config.advertising))
2057                                         current_link_up = 0;
2058                         } else {
2059                                 current_link_up = 0;
2060                         }
2061                 } else {
2062                         if (!(bmcr & BMCR_ANENABLE) &&
2063                             tp->link_config.speed == current_speed &&
2064                             tp->link_config.duplex == current_duplex) {
2065                                 current_link_up = 1;
2066                         } else {
2067                                 current_link_up = 0;
2068                         }
2069                 }
2070
2071                 tp->link_config.active_speed = current_speed;
2072                 tp->link_config.active_duplex = current_duplex;
2073         }
2074
2075         if (current_link_up == 1 &&
2076             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078                 u32 local_adv, remote_adv;
2079
2080                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2081                         local_adv = 0;
2082                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2083
2084                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2085                         remote_adv = 0;
2086
2087                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2088
2089                 /* If we are not advertising full pause capability,
2090                  * something is wrong.  Bring the link down and reconfigure.
2091                  */
2092                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093                         current_link_up = 0;
2094                 } else {
2095                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2096                 }
2097         }
2098 relink:
2099         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2100                 u32 tmp;
2101
2102                 tg3_phy_copper_begin(tp);
2103
2104                 tg3_readphy(tp, MII_BMSR, &tmp);
2105                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106                     (tmp & BMSR_LSTATUS))
2107                         current_link_up = 1;
2108         }
2109
2110         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111         if (current_link_up == 1) {
2112                 if (tp->link_config.active_speed == SPEED_100 ||
2113                     tp->link_config.active_speed == SPEED_10)
2114                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2115                 else
2116                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117         } else
2118                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2119
2120         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121         if (tp->link_config.active_duplex == DUPLEX_HALF)
2122                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2123
2124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2125                 if (current_link_up == 1 &&
2126                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2127                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2128                 else
2129                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2130         }
2131
2132         /* ??? Without this setting Netgear GA302T PHY does not
2133          * ??? send/receive packets...
2134          */
2135         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2139                 udelay(80);
2140         }
2141
2142         tw32_f(MAC_MODE, tp->mac_mode);
2143         udelay(40);
2144
2145         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146                 /* Polled via timer. */
2147                 tw32_f(MAC_EVENT, 0);
2148         } else {
2149                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2150         }
2151         udelay(40);
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154             current_link_up == 1 &&
2155             tp->link_config.active_speed == SPEED_1000 &&
2156             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2158                 udelay(120);
2159                 tw32_f(MAC_STATUS,
2160                      (MAC_STATUS_SYNC_CHANGED |
2161                       MAC_STATUS_CFG_CHANGED));
2162                 udelay(40);
2163                 tg3_write_mem(tp,
2164                               NIC_SRAM_FIRMWARE_MBOX,
2165                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2166         }
2167
2168         if (current_link_up != netif_carrier_ok(tp->dev)) {
2169                 if (current_link_up)
2170                         netif_carrier_on(tp->dev);
2171                 else
2172                         netif_carrier_off(tp->dev);
2173                 tg3_link_report(tp);
2174         }
2175
2176         return 0;
2177 }
2178
2179 struct tg3_fiber_aneginfo {
2180         int state;
2181 #define ANEG_STATE_UNKNOWN              0
2182 #define ANEG_STATE_AN_ENABLE            1
2183 #define ANEG_STATE_RESTART_INIT         2
2184 #define ANEG_STATE_RESTART              3
2185 #define ANEG_STATE_DISABLE_LINK_OK      4
2186 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2187 #define ANEG_STATE_ABILITY_DETECT       6
2188 #define ANEG_STATE_ACK_DETECT_INIT      7
2189 #define ANEG_STATE_ACK_DETECT           8
2190 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2191 #define ANEG_STATE_COMPLETE_ACK         10
2192 #define ANEG_STATE_IDLE_DETECT_INIT     11
2193 #define ANEG_STATE_IDLE_DETECT          12
2194 #define ANEG_STATE_LINK_OK              13
2195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2196 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2197
2198         u32 flags;
2199 #define MR_AN_ENABLE            0x00000001
2200 #define MR_RESTART_AN           0x00000002
2201 #define MR_AN_COMPLETE          0x00000004
2202 #define MR_PAGE_RX              0x00000008
2203 #define MR_NP_LOADED            0x00000010
2204 #define MR_TOGGLE_TX            0x00000020
2205 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2206 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2207 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2208 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2212 #define MR_TOGGLE_RX            0x00002000
2213 #define MR_NP_RX                0x00004000
2214
2215 #define MR_LINK_OK              0x80000000
2216
2217         unsigned long link_time, cur_time;
2218
2219         u32 ability_match_cfg;
2220         int ability_match_count;
2221
2222         char ability_match, idle_match, ack_match;
2223
2224         u32 txconfig, rxconfig;
2225 #define ANEG_CFG_NP             0x00000080
2226 #define ANEG_CFG_ACK            0x00000040
2227 #define ANEG_CFG_RF2            0x00000020
2228 #define ANEG_CFG_RF1            0x00000010
2229 #define ANEG_CFG_PS2            0x00000001
2230 #define ANEG_CFG_PS1            0x00008000
2231 #define ANEG_CFG_HD             0x00004000
2232 #define ANEG_CFG_FD             0x00002000
2233 #define ANEG_CFG_INVAL          0x00001f06
2234
2235 };
2236 #define ANEG_OK         0
2237 #define ANEG_DONE       1
2238 #define ANEG_TIMER_ENAB 2
2239 #define ANEG_FAILED     -1
2240
2241 #define ANEG_STATE_SETTLE_TIME  10000
2242
2243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244                                    struct tg3_fiber_aneginfo *ap)
2245 {
2246         unsigned long delta;
2247         u32 rx_cfg_reg;
2248         int ret;
2249
2250         if (ap->state == ANEG_STATE_UNKNOWN) {
2251                 ap->rxconfig = 0;
2252                 ap->link_time = 0;
2253                 ap->cur_time = 0;
2254                 ap->ability_match_cfg = 0;
2255                 ap->ability_match_count = 0;
2256                 ap->ability_match = 0;
2257                 ap->idle_match = 0;
2258                 ap->ack_match = 0;
2259         }
2260         ap->cur_time++;
2261
2262         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2264
2265                 if (rx_cfg_reg != ap->ability_match_cfg) {
2266                         ap->ability_match_cfg = rx_cfg_reg;
2267                         ap->ability_match = 0;
2268                         ap->ability_match_count = 0;
2269                 } else {
2270                         if (++ap->ability_match_count > 1) {
2271                                 ap->ability_match = 1;
2272                                 ap->ability_match_cfg = rx_cfg_reg;
2273                         }
2274                 }
2275                 if (rx_cfg_reg & ANEG_CFG_ACK)
2276                         ap->ack_match = 1;
2277                 else
2278                         ap->ack_match = 0;
2279
2280                 ap->idle_match = 0;
2281         } else {
2282                 ap->idle_match = 1;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->ack_match = 0;
2287
2288                 rx_cfg_reg = 0;
2289         }
2290
2291         ap->rxconfig = rx_cfg_reg;
2292         ret = ANEG_OK;
2293
2294         switch(ap->state) {
2295         case ANEG_STATE_UNKNOWN:
2296                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297                         ap->state = ANEG_STATE_AN_ENABLE;
2298
2299                 /* fallthru */
2300         case ANEG_STATE_AN_ENABLE:
2301                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302                 if (ap->flags & MR_AN_ENABLE) {
2303                         ap->link_time = 0;
2304                         ap->cur_time = 0;
2305                         ap->ability_match_cfg = 0;
2306                         ap->ability_match_count = 0;
2307                         ap->ability_match = 0;
2308                         ap->idle_match = 0;
2309                         ap->ack_match = 0;
2310
2311                         ap->state = ANEG_STATE_RESTART_INIT;
2312                 } else {
2313                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2314                 }
2315                 break;
2316
2317         case ANEG_STATE_RESTART_INIT:
2318                 ap->link_time = ap->cur_time;
2319                 ap->flags &= ~(MR_NP_LOADED);
2320                 ap->txconfig = 0;
2321                 tw32(MAC_TX_AUTO_NEG, 0);
2322                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323                 tw32_f(MAC_MODE, tp->mac_mode);
2324                 udelay(40);
2325
2326                 ret = ANEG_TIMER_ENAB;
2327                 ap->state = ANEG_STATE_RESTART;
2328
2329                 /* fallthru */
2330         case ANEG_STATE_RESTART:
2331                 delta = ap->cur_time - ap->link_time;
2332                 if (delta > ANEG_STATE_SETTLE_TIME) {
2333                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2334                 } else {
2335                         ret = ANEG_TIMER_ENAB;
2336                 }
2337                 break;
2338
2339         case ANEG_STATE_DISABLE_LINK_OK:
2340                 ret = ANEG_DONE;
2341                 break;
2342
2343         case ANEG_STATE_ABILITY_DETECT_INIT:
2344                 ap->flags &= ~(MR_TOGGLE_TX);
2345                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348                 tw32_f(MAC_MODE, tp->mac_mode);
2349                 udelay(40);
2350
2351                 ap->state = ANEG_STATE_ABILITY_DETECT;
2352                 break;
2353
2354         case ANEG_STATE_ABILITY_DETECT:
2355                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2357                 }
2358                 break;
2359
2360         case ANEG_STATE_ACK_DETECT_INIT:
2361                 ap->txconfig |= ANEG_CFG_ACK;
2362                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364                 tw32_f(MAC_MODE, tp->mac_mode);
2365                 udelay(40);
2366
2367                 ap->state = ANEG_STATE_ACK_DETECT;
2368
2369                 /* fallthru */
2370         case ANEG_STATE_ACK_DETECT:
2371                 if (ap->ack_match != 0) {
2372                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2375                         } else {
2376                                 ap->state = ANEG_STATE_AN_ENABLE;
2377                         }
2378                 } else if (ap->ability_match != 0 &&
2379                            ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                 }
2382                 break;
2383
2384         case ANEG_STATE_COMPLETE_ACK_INIT:
2385                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2386                         ret = ANEG_FAILED;
2387                         break;
2388                 }
2389                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390                                MR_LP_ADV_HALF_DUPLEX |
2391                                MR_LP_ADV_SYM_PAUSE |
2392                                MR_LP_ADV_ASYM_PAUSE |
2393                                MR_LP_ADV_REMOTE_FAULT1 |
2394                                MR_LP_ADV_REMOTE_FAULT2 |
2395                                MR_LP_ADV_NEXT_PAGE |
2396                                MR_TOGGLE_RX |
2397                                MR_NP_RX);
2398                 if (ap->rxconfig & ANEG_CFG_FD)
2399                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400                 if (ap->rxconfig & ANEG_CFG_HD)
2401                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402                 if (ap->rxconfig & ANEG_CFG_PS1)
2403                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404                 if (ap->rxconfig & ANEG_CFG_PS2)
2405                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406                 if (ap->rxconfig & ANEG_CFG_RF1)
2407                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408                 if (ap->rxconfig & ANEG_CFG_RF2)
2409                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410                 if (ap->rxconfig & ANEG_CFG_NP)
2411                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2412
2413                 ap->link_time = ap->cur_time;
2414
2415                 ap->flags ^= (MR_TOGGLE_TX);
2416                 if (ap->rxconfig & 0x0008)
2417                         ap->flags |= MR_TOGGLE_RX;
2418                 if (ap->rxconfig & ANEG_CFG_NP)
2419                         ap->flags |= MR_NP_RX;
2420                 ap->flags |= MR_PAGE_RX;
2421
2422                 ap->state = ANEG_STATE_COMPLETE_ACK;
2423                 ret = ANEG_TIMER_ENAB;
2424                 break;
2425
2426         case ANEG_STATE_COMPLETE_ACK:
2427                 if (ap->ability_match != 0 &&
2428                     ap->rxconfig == 0) {
2429                         ap->state = ANEG_STATE_AN_ENABLE;
2430                         break;
2431                 }
2432                 delta = ap->cur_time - ap->link_time;
2433                 if (delta > ANEG_STATE_SETTLE_TIME) {
2434                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2436                         } else {
2437                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438                                     !(ap->flags & MR_NP_RX)) {
2439                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2440                                 } else {
2441                                         ret = ANEG_FAILED;
2442                                 }
2443                         }
2444                 }
2445                 break;
2446
2447         case ANEG_STATE_IDLE_DETECT_INIT:
2448                 ap->link_time = ap->cur_time;
2449                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450                 tw32_f(MAC_MODE, tp->mac_mode);
2451                 udelay(40);
2452
2453                 ap->state = ANEG_STATE_IDLE_DETECT;
2454                 ret = ANEG_TIMER_ENAB;
2455                 break;
2456
2457         case ANEG_STATE_IDLE_DETECT:
2458                 if (ap->ability_match != 0 &&
2459                     ap->rxconfig == 0) {
2460                         ap->state = ANEG_STATE_AN_ENABLE;
2461                         break;
2462                 }
2463                 delta = ap->cur_time - ap->link_time;
2464                 if (delta > ANEG_STATE_SETTLE_TIME) {
2465                         /* XXX another gem from the Broadcom driver :( */
2466                         ap->state = ANEG_STATE_LINK_OK;
2467                 }
2468                 break;
2469
2470         case ANEG_STATE_LINK_OK:
2471                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2472                 ret = ANEG_DONE;
2473                 break;
2474
2475         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476                 /* ??? unimplemented */
2477                 break;
2478
2479         case ANEG_STATE_NEXT_PAGE_WAIT:
2480                 /* ??? unimplemented */
2481                 break;
2482
2483         default:
2484                 ret = ANEG_FAILED;
2485                 break;
2486         };
2487
2488         return ret;
2489 }
2490
2491 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2492 {
2493         int res = 0;
2494         struct tg3_fiber_aneginfo aninfo;
2495         int status = ANEG_FAILED;
2496         unsigned int tick;
2497         u32 tmp;
2498
2499         tw32_f(MAC_TX_AUTO_NEG, 0);
2500
2501         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2503         udelay(40);
2504
2505         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2506         udelay(40);
2507
2508         memset(&aninfo, 0, sizeof(aninfo));
2509         aninfo.flags |= MR_AN_ENABLE;
2510         aninfo.state = ANEG_STATE_UNKNOWN;
2511         aninfo.cur_time = 0;
2512         tick = 0;
2513         while (++tick < 195000) {
2514                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515                 if (status == ANEG_DONE || status == ANEG_FAILED)
2516                         break;
2517
2518                 udelay(1);
2519         }
2520
2521         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522         tw32_f(MAC_MODE, tp->mac_mode);
2523         udelay(40);
2524
2525         *flags = aninfo.flags;
2526
2527         if (status == ANEG_DONE &&
2528             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529                              MR_LP_ADV_FULL_DUPLEX)))
2530                 res = 1;
2531
2532         return res;
2533 }
2534
2535 static void tg3_init_bcm8002(struct tg3 *tp)
2536 {
2537         u32 mac_status = tr32(MAC_STATUS);
2538         int i;
2539
2540         /* Reset when initting first time or we have a link. */
2541         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542             !(mac_status & MAC_STATUS_PCS_SYNCED))
2543                 return;
2544
2545         /* Set PLL lock range. */
2546         tg3_writephy(tp, 0x16, 0x8007);
2547
2548         /* SW reset */
2549         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2550
2551         /* Wait for reset to complete. */
2552         /* XXX schedule_timeout() ... */
2553         for (i = 0; i < 500; i++)
2554                 udelay(10);
2555
2556         /* Config mode; select PMA/Ch 1 regs. */
2557         tg3_writephy(tp, 0x10, 0x8411);
2558
2559         /* Enable auto-lock and comdet, select txclk for tx. */
2560         tg3_writephy(tp, 0x11, 0x0a10);
2561
2562         tg3_writephy(tp, 0x18, 0x00a0);
2563         tg3_writephy(tp, 0x16, 0x41ff);
2564
2565         /* Assert and deassert POR. */
2566         tg3_writephy(tp, 0x13, 0x0400);
2567         udelay(40);
2568         tg3_writephy(tp, 0x13, 0x0000);
2569
2570         tg3_writephy(tp, 0x11, 0x0a50);
2571         udelay(40);
2572         tg3_writephy(tp, 0x11, 0x0a10);
2573
2574         /* Wait for signal to stabilize */
2575         /* XXX schedule_timeout() ... */
2576         for (i = 0; i < 15000; i++)
2577                 udelay(10);
2578
2579         /* Deselect the channel register so we can read the PHYID
2580          * later.
2581          */
2582         tg3_writephy(tp, 0x10, 0x8011);
2583 }
2584
2585 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2586 {
2587         u32 sg_dig_ctrl, sg_dig_status;
2588         u32 serdes_cfg, expected_sg_dig_ctrl;
2589         int workaround, port_a;
2590         int current_link_up;
2591
2592         serdes_cfg = 0;
2593         expected_sg_dig_ctrl = 0;
2594         workaround = 0;
2595         port_a = 1;
2596         current_link_up = 0;
2597
2598         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2600                 workaround = 1;
2601                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2602                         port_a = 0;
2603
2604                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605                 /* preserve bits 20-23 for voltage regulator */
2606                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2607         }
2608
2609         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2610
2611         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612                 if (sg_dig_ctrl & (1 << 31)) {
2613                         if (workaround) {
2614                                 u32 val = serdes_cfg;
2615
2616                                 if (port_a)
2617                                         val |= 0xc010000;
2618                                 else
2619                                         val |= 0x4010000;
2620                                 tw32_f(MAC_SERDES_CFG, val);
2621                         }
2622                         tw32_f(SG_DIG_CTRL, 0x01388400);
2623                 }
2624                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625                         tg3_setup_flow_control(tp, 0, 0);
2626                         current_link_up = 1;
2627                 }
2628                 goto out;
2629         }
2630
2631         /* Want auto-negotiation.  */
2632         expected_sg_dig_ctrl = 0x81388400;
2633
2634         /* Pause capability */
2635         expected_sg_dig_ctrl |= (1 << 11);
2636
2637         /* Asymettric pause */
2638         expected_sg_dig_ctrl |= (1 << 12);
2639
2640         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2641                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642                     tp->serdes_counter &&
2643                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644                                     MAC_STATUS_RCVD_CFG)) ==
2645                      MAC_STATUS_PCS_SYNCED)) {
2646                         tp->serdes_counter--;
2647                         current_link_up = 1;
2648                         goto out;
2649                 }
2650 restart_autoneg:
2651                 if (workaround)
2652                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2654                 udelay(5);
2655                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2656
2657                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2659         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660                                  MAC_STATUS_SIGNAL_DET)) {
2661                 sg_dig_status = tr32(SG_DIG_STATUS);
2662                 mac_status = tr32(MAC_STATUS);
2663
2664                 if ((sg_dig_status & (1 << 1)) &&
2665                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666                         u32 local_adv, remote_adv;
2667
2668                         local_adv = ADVERTISE_PAUSE_CAP;
2669                         remote_adv = 0;
2670                         if (sg_dig_status & (1 << 19))
2671                                 remote_adv |= LPA_PAUSE_CAP;
2672                         if (sg_dig_status & (1 << 20))
2673                                 remote_adv |= LPA_PAUSE_ASYM;
2674
2675                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2676                         current_link_up = 1;
2677                         tp->serdes_counter = 0;
2678                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2679                 } else if (!(sg_dig_status & (1 << 1))) {
2680                         if (tp->serdes_counter)
2681                                 tp->serdes_counter--;
2682                         else {
2683                                 if (workaround) {
2684                                         u32 val = serdes_cfg;
2685
2686                                         if (port_a)
2687                                                 val |= 0xc010000;
2688                                         else
2689                                                 val |= 0x4010000;
2690
2691                                         tw32_f(MAC_SERDES_CFG, val);
2692                                 }
2693
2694                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2695                                 udelay(40);
2696
2697                                 /* Link parallel detection - link is up */
2698                                 /* only if we have PCS_SYNC and not */
2699                                 /* receiving config code words */
2700                                 mac_status = tr32(MAC_STATUS);
2701                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703                                         tg3_setup_flow_control(tp, 0, 0);
2704                                         current_link_up = 1;
2705                                         tp->tg3_flags2 |=
2706                                                 TG3_FLG2_PARALLEL_DETECT;
2707                                         tp->serdes_counter =
2708                                                 SERDES_PARALLEL_DET_TIMEOUT;
2709                                 } else
2710                                         goto restart_autoneg;
2711                         }
2712                 }
2713         } else {
2714                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2716         }
2717
2718 out:
2719         return current_link_up;
2720 }
2721
2722 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2723 {
2724         int current_link_up = 0;
2725
2726         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2727                 goto out;
2728
2729         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2730                 u32 flags;
2731                 int i;
2732
2733                 if (fiber_autoneg(tp, &flags)) {
2734                         u32 local_adv, remote_adv;
2735
2736                         local_adv = ADVERTISE_PAUSE_CAP;
2737                         remote_adv = 0;
2738                         if (flags & MR_LP_ADV_SYM_PAUSE)
2739                                 remote_adv |= LPA_PAUSE_CAP;
2740                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2741                                 remote_adv |= LPA_PAUSE_ASYM;
2742
2743                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2744
2745                         current_link_up = 1;
2746                 }
2747                 for (i = 0; i < 30; i++) {
2748                         udelay(20);
2749                         tw32_f(MAC_STATUS,
2750                                (MAC_STATUS_SYNC_CHANGED |
2751                                 MAC_STATUS_CFG_CHANGED));
2752                         udelay(40);
2753                         if ((tr32(MAC_STATUS) &
2754                              (MAC_STATUS_SYNC_CHANGED |
2755                               MAC_STATUS_CFG_CHANGED)) == 0)
2756                                 break;
2757                 }
2758
2759                 mac_status = tr32(MAC_STATUS);
2760                 if (current_link_up == 0 &&
2761                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762                     !(mac_status & MAC_STATUS_RCVD_CFG))
2763                         current_link_up = 1;
2764         } else {
2765                 /* Forcing 1000FD link up. */
2766                 current_link_up = 1;
2767
2768                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2769                 udelay(40);
2770
2771                 tw32_f(MAC_MODE, tp->mac_mode);
2772                 udelay(40);
2773         }
2774
2775 out:
2776         return current_link_up;
2777 }
2778
2779 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2780 {
2781         u32 orig_pause_cfg;
2782         u16 orig_active_speed;
2783         u8 orig_active_duplex;
2784         u32 mac_status;
2785         int current_link_up;
2786         int i;
2787
2788         orig_pause_cfg =
2789                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790                                   TG3_FLAG_TX_PAUSE));
2791         orig_active_speed = tp->link_config.active_speed;
2792         orig_active_duplex = tp->link_config.active_duplex;
2793
2794         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795             netif_carrier_ok(tp->dev) &&
2796             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797                 mac_status = tr32(MAC_STATUS);
2798                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799                                MAC_STATUS_SIGNAL_DET |
2800                                MAC_STATUS_CFG_CHANGED |
2801                                MAC_STATUS_RCVD_CFG);
2802                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803                                    MAC_STATUS_SIGNAL_DET)) {
2804                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805                                             MAC_STATUS_CFG_CHANGED));
2806                         return 0;
2807                 }
2808         }
2809
2810         tw32_f(MAC_TX_AUTO_NEG, 0);
2811
2812         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814         tw32_f(MAC_MODE, tp->mac_mode);
2815         udelay(40);
2816
2817         if (tp->phy_id == PHY_ID_BCM8002)
2818                 tg3_init_bcm8002(tp);
2819
2820         /* Enable link change event even when serdes polling.  */
2821         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822         udelay(40);
2823
2824         current_link_up = 0;
2825         mac_status = tr32(MAC_STATUS);
2826
2827         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2829         else
2830                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2831
2832         tp->hw_status->status =
2833                 (SD_STATUS_UPDATED |
2834                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2835
2836         for (i = 0; i < 100; i++) {
2837                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838                                     MAC_STATUS_CFG_CHANGED));
2839                 udelay(5);
2840                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2841                                          MAC_STATUS_CFG_CHANGED |
2842                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2843                         break;
2844         }
2845
2846         mac_status = tr32(MAC_STATUS);
2847         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848                 current_link_up = 0;
2849                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850                     tp->serdes_counter == 0) {
2851                         tw32_f(MAC_MODE, (tp->mac_mode |
2852                                           MAC_MODE_SEND_CONFIGS));
2853                         udelay(1);
2854                         tw32_f(MAC_MODE, tp->mac_mode);
2855                 }
2856         }
2857
2858         if (current_link_up == 1) {
2859                 tp->link_config.active_speed = SPEED_1000;
2860                 tp->link_config.active_duplex = DUPLEX_FULL;
2861                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862                                     LED_CTRL_LNKLED_OVERRIDE |
2863                                     LED_CTRL_1000MBPS_ON));
2864         } else {
2865                 tp->link_config.active_speed = SPEED_INVALID;
2866                 tp->link_config.active_duplex = DUPLEX_INVALID;
2867                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868                                     LED_CTRL_LNKLED_OVERRIDE |
2869                                     LED_CTRL_TRAFFIC_OVERRIDE));
2870         }
2871
2872         if (current_link_up != netif_carrier_ok(tp->dev)) {
2873                 if (current_link_up)
2874                         netif_carrier_on(tp->dev);
2875                 else
2876                         netif_carrier_off(tp->dev);
2877                 tg3_link_report(tp);
2878         } else {
2879                 u32 now_pause_cfg =
2880                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2881                                          TG3_FLAG_TX_PAUSE);
2882                 if (orig_pause_cfg != now_pause_cfg ||
2883                     orig_active_speed != tp->link_config.active_speed ||
2884                     orig_active_duplex != tp->link_config.active_duplex)
2885                         tg3_link_report(tp);
2886         }
2887
2888         return 0;
2889 }
2890
2891 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2892 {
2893         int current_link_up, err = 0;
2894         u32 bmsr, bmcr;
2895         u16 current_speed;
2896         u8 current_duplex;
2897
2898         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899         tw32_f(MAC_MODE, tp->mac_mode);
2900         udelay(40);
2901
2902         tw32(MAC_EVENT, 0);
2903
2904         tw32_f(MAC_STATUS,
2905              (MAC_STATUS_SYNC_CHANGED |
2906               MAC_STATUS_CFG_CHANGED |
2907               MAC_STATUS_MI_COMPLETION |
2908               MAC_STATUS_LNKSTATE_CHANGED));
2909         udelay(40);
2910
2911         if (force_reset)
2912                 tg3_phy_reset(tp);
2913
2914         current_link_up = 0;
2915         current_speed = SPEED_INVALID;
2916         current_duplex = DUPLEX_INVALID;
2917
2918         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922                         bmsr |= BMSR_LSTATUS;
2923                 else
2924                         bmsr &= ~BMSR_LSTATUS;
2925         }
2926
2927         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2928
2929         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931                 /* do nothing, just check for link up at the end */
2932         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2933                 u32 adv, new_adv;
2934
2935                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937                                   ADVERTISE_1000XPAUSE |
2938                                   ADVERTISE_1000XPSE_ASYM |
2939                                   ADVERTISE_SLCT);
2940
2941                 /* Always advertise symmetric PAUSE just like copper */
2942                 new_adv |= ADVERTISE_1000XPAUSE;
2943
2944                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945                         new_adv |= ADVERTISE_1000XHALF;
2946                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947                         new_adv |= ADVERTISE_1000XFULL;
2948
2949                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952                         tg3_writephy(tp, MII_BMCR, bmcr);
2953
2954                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2955                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2956                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957
2958                         return err;
2959                 }
2960         } else {
2961                 u32 new_bmcr;
2962
2963                 bmcr &= ~BMCR_SPEED1000;
2964                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2965
2966                 if (tp->link_config.duplex == DUPLEX_FULL)
2967                         new_bmcr |= BMCR_FULLDPLX;
2968
2969                 if (new_bmcr != bmcr) {
2970                         /* BMCR_SPEED1000 is a reserved bit that needs
2971                          * to be set on write.
2972                          */
2973                         new_bmcr |= BMCR_SPEED1000;
2974
2975                         /* Force a linkdown */
2976                         if (netif_carrier_ok(tp->dev)) {
2977                                 u32 adv;
2978
2979                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980                                 adv &= ~(ADVERTISE_1000XFULL |
2981                                          ADVERTISE_1000XHALF |
2982                                          ADVERTISE_SLCT);
2983                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2984                                 tg3_writephy(tp, MII_BMCR, bmcr |
2985                                                            BMCR_ANRESTART |
2986                                                            BMCR_ANENABLE);
2987                                 udelay(10);
2988                                 netif_carrier_off(tp->dev);
2989                         }
2990                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2991                         bmcr = new_bmcr;
2992                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2995                             ASIC_REV_5714) {
2996                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997                                         bmsr |= BMSR_LSTATUS;
2998                                 else
2999                                         bmsr &= ~BMSR_LSTATUS;
3000                         }
3001                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3002                 }
3003         }
3004
3005         if (bmsr & BMSR_LSTATUS) {
3006                 current_speed = SPEED_1000;
3007                 current_link_up = 1;
3008                 if (bmcr & BMCR_FULLDPLX)
3009                         current_duplex = DUPLEX_FULL;
3010                 else
3011                         current_duplex = DUPLEX_HALF;
3012
3013                 if (bmcr & BMCR_ANENABLE) {
3014                         u32 local_adv, remote_adv, common;
3015
3016                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018                         common = local_adv & remote_adv;
3019                         if (common & (ADVERTISE_1000XHALF |
3020                                       ADVERTISE_1000XFULL)) {
3021                                 if (common & ADVERTISE_1000XFULL)
3022                                         current_duplex = DUPLEX_FULL;
3023                                 else
3024                                         current_duplex = DUPLEX_HALF;
3025
3026                                 tg3_setup_flow_control(tp, local_adv,
3027                                                        remote_adv);
3028                         }
3029                         else
3030                                 current_link_up = 0;
3031                 }
3032         }
3033
3034         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035         if (tp->link_config.active_duplex == DUPLEX_HALF)
3036                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3037
3038         tw32_f(MAC_MODE, tp->mac_mode);
3039         udelay(40);
3040
3041         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3042
3043         tp->link_config.active_speed = current_speed;
3044         tp->link_config.active_duplex = current_duplex;
3045
3046         if (current_link_up != netif_carrier_ok(tp->dev)) {
3047                 if (current_link_up)
3048                         netif_carrier_on(tp->dev);
3049                 else {
3050                         netif_carrier_off(tp->dev);
3051                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3052                 }
3053                 tg3_link_report(tp);
3054         }
3055         return err;
3056 }
3057
3058 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3059 {
3060         if (tp->serdes_counter) {
3061                 /* Give autoneg time to complete. */
3062                 tp->serdes_counter--;
3063                 return;
3064         }
3065         if (!netif_carrier_ok(tp->dev) &&
3066             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3067                 u32 bmcr;
3068
3069                 tg3_readphy(tp, MII_BMCR, &bmcr);
3070                 if (bmcr & BMCR_ANENABLE) {
3071                         u32 phy1, phy2;
3072
3073                         /* Select shadow register 0x1f */
3074                         tg3_writephy(tp, 0x1c, 0x7c00);
3075                         tg3_readphy(tp, 0x1c, &phy1);
3076
3077                         /* Select expansion interrupt status register */
3078                         tg3_writephy(tp, 0x17, 0x0f01);
3079                         tg3_readphy(tp, 0x15, &phy2);
3080                         tg3_readphy(tp, 0x15, &phy2);
3081
3082                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083                                 /* We have signal detect and not receiving
3084                                  * config code words, link is up by parallel
3085                                  * detection.
3086                                  */
3087
3088                                 bmcr &= ~BMCR_ANENABLE;
3089                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090                                 tg3_writephy(tp, MII_BMCR, bmcr);
3091                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3092                         }
3093                 }
3094         }
3095         else if (netif_carrier_ok(tp->dev) &&
3096                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3098                 u32 phy2;
3099
3100                 /* Select expansion interrupt status register */
3101                 tg3_writephy(tp, 0x17, 0x0f01);
3102                 tg3_readphy(tp, 0x15, &phy2);
3103                 if (phy2 & 0x20) {
3104                         u32 bmcr;
3105
3106                         /* Config code words received, turn on autoneg. */
3107                         tg3_readphy(tp, MII_BMCR, &bmcr);
3108                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3109
3110                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3111
3112                 }
3113         }
3114 }
3115
3116 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3117 {
3118         int err;
3119
3120         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121                 err = tg3_setup_fiber_phy(tp, force_reset);
3122         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3124         } else {
3125                 err = tg3_setup_copper_phy(tp, force_reset);
3126         }
3127
3128         if (tp->link_config.active_speed == SPEED_1000 &&
3129             tp->link_config.active_duplex == DUPLEX_HALF)
3130                 tw32(MAC_TX_LENGTHS,
3131                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132                       (6 << TX_LENGTHS_IPG_SHIFT) |
3133                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3134         else
3135                 tw32(MAC_TX_LENGTHS,
3136                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137                       (6 << TX_LENGTHS_IPG_SHIFT) |
3138                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3139
3140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141                 if (netif_carrier_ok(tp->dev)) {
3142                         tw32(HOSTCC_STAT_COAL_TICKS,
3143                              tp->coal.stats_block_coalesce_usecs);
3144                 } else {
3145                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3146                 }
3147         }
3148
3149         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151                 if (!netif_carrier_ok(tp->dev))
3152                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3153                               tp->pwrmgmt_thresh;
3154                 else
3155                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156                 tw32(PCIE_PWR_MGMT_THRESH, val);
3157         }
3158
3159         return err;
3160 }
3161
3162 /* This is called whenever we suspect that the system chipset is re-
3163  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164  * is bogus tx completions. We try to recover by setting the
3165  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3166  * in the workqueue.
3167  */
3168 static void tg3_tx_recover(struct tg3 *tp)
3169 {
3170         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3172
3173         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174                "mapped I/O cycles to the network device, attempting to "
3175                "recover. Please report the problem to the driver maintainer "
3176                "and include system chipset information.\n", tp->dev->name);
3177
3178         spin_lock(&tp->lock);
3179         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3180         spin_unlock(&tp->lock);
3181 }
3182
3183 static inline u32 tg3_tx_avail(struct tg3 *tp)
3184 {
3185         smp_mb();
3186         return (tp->tx_pending -
3187                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3188 }
3189
3190 /* Tigon3 never reports partial packet sends.  So we do not
3191  * need special logic to handle SKBs that have not had all
3192  * of their frags sent yet, like SunGEM does.
3193  */
3194 static void tg3_tx(struct tg3 *tp)
3195 {
3196         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197         u32 sw_idx = tp->tx_cons;
3198
3199         while (sw_idx != hw_idx) {
3200                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201                 struct sk_buff *skb = ri->skb;
3202                 int i, tx_bug = 0;
3203
3204                 if (unlikely(skb == NULL)) {
3205                         tg3_tx_recover(tp);
3206                         return;
3207                 }
3208
3209                 pci_unmap_single(tp->pdev,
3210                                  pci_unmap_addr(ri, mapping),
3211                                  skb_headlen(skb),
3212                                  PCI_DMA_TODEVICE);
3213
3214                 ri->skb = NULL;
3215
3216                 sw_idx = NEXT_TX(sw_idx);
3217
3218                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3219                         ri = &tp->tx_buffers[sw_idx];
3220                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3221                                 tx_bug = 1;
3222
3223                         pci_unmap_page(tp->pdev,
3224                                        pci_unmap_addr(ri, mapping),
3225                                        skb_shinfo(skb)->frags[i].size,
3226                                        PCI_DMA_TODEVICE);
3227
3228                         sw_idx = NEXT_TX(sw_idx);
3229                 }
3230
3231                 dev_kfree_skb(skb);
3232
3233                 if (unlikely(tx_bug)) {
3234                         tg3_tx_recover(tp);
3235                         return;
3236                 }
3237         }
3238
3239         tp->tx_cons = sw_idx;
3240
3241         /* Need to make the tx_cons update visible to tg3_start_xmit()
3242          * before checking for netif_queue_stopped().  Without the
3243          * memory barrier, there is a small possibility that tg3_start_xmit()
3244          * will miss it and cause the queue to be stopped forever.
3245          */
3246         smp_mb();
3247
3248         if (unlikely(netif_queue_stopped(tp->dev) &&
3249                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3250                 netif_tx_lock(tp->dev);
3251                 if (netif_queue_stopped(tp->dev) &&
3252                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3253                         netif_wake_queue(tp->dev);
3254                 netif_tx_unlock(tp->dev);
3255         }
3256 }
3257
3258 /* Returns size of skb allocated or < 0 on error.
3259  *
3260  * We only need to fill in the address because the other members
3261  * of the RX descriptor are invariant, see tg3_init_rings.
3262  *
3263  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3264  * posting buffers we only dirty the first cache line of the RX
3265  * descriptor (containing the address).  Whereas for the RX status
3266  * buffers the cpu only reads the last cacheline of the RX descriptor
3267  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3268  */
3269 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3270                             int src_idx, u32 dest_idx_unmasked)
3271 {
3272         struct tg3_rx_buffer_desc *desc;
3273         struct ring_info *map, *src_map;
3274         struct sk_buff *skb;
3275         dma_addr_t mapping;
3276         int skb_size, dest_idx;
3277
3278         src_map = NULL;
3279         switch (opaque_key) {
3280         case RXD_OPAQUE_RING_STD:
3281                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3282                 desc = &tp->rx_std[dest_idx];
3283                 map = &tp->rx_std_buffers[dest_idx];
3284                 if (src_idx >= 0)
3285                         src_map = &tp->rx_std_buffers[src_idx];
3286                 skb_size = tp->rx_pkt_buf_sz;
3287                 break;
3288
3289         case RXD_OPAQUE_RING_JUMBO:
3290                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3291                 desc = &tp->rx_jumbo[dest_idx];
3292                 map = &tp->rx_jumbo_buffers[dest_idx];
3293                 if (src_idx >= 0)
3294                         src_map = &tp->rx_jumbo_buffers[src_idx];
3295                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3296                 break;
3297
3298         default:
3299                 return -EINVAL;
3300         };
3301
3302         /* Do not overwrite any of the map or rp information
3303          * until we are sure we can commit to a new buffer.
3304          *
3305          * Callers depend upon this behavior and assume that
3306          * we leave everything unchanged if we fail.
3307          */
3308         skb = netdev_alloc_skb(tp->dev, skb_size);
3309         if (skb == NULL)
3310                 return -ENOMEM;
3311
3312         skb_reserve(skb, tp->rx_offset);
3313
3314         mapping = pci_map_single(tp->pdev, skb->data,
3315                                  skb_size - tp->rx_offset,
3316                                  PCI_DMA_FROMDEVICE);
3317
3318         map->skb = skb;
3319         pci_unmap_addr_set(map, mapping, mapping);
3320
3321         if (src_map != NULL)
3322                 src_map->skb = NULL;
3323
3324         desc->addr_hi = ((u64)mapping >> 32);
3325         desc->addr_lo = ((u64)mapping & 0xffffffff);
3326
3327         return skb_size;
3328 }
3329
3330 /* We only need to move over in the address because the other
3331  * members of the RX descriptor are invariant.  See notes above
3332  * tg3_alloc_rx_skb for full details.
3333  */
3334 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3335                            int src_idx, u32 dest_idx_unmasked)
3336 {
3337         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3338         struct ring_info *src_map, *dest_map;
3339         int dest_idx;
3340
3341         switch (opaque_key) {
3342         case RXD_OPAQUE_RING_STD:
3343                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3344                 dest_desc = &tp->rx_std[dest_idx];
3345                 dest_map = &tp->rx_std_buffers[dest_idx];
3346                 src_desc = &tp->rx_std[src_idx];
3347                 src_map = &tp->rx_std_buffers[src_idx];
3348                 break;
3349
3350         case RXD_OPAQUE_RING_JUMBO:
3351                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3352                 dest_desc = &tp->rx_jumbo[dest_idx];
3353                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3354                 src_desc = &tp->rx_jumbo[src_idx];
3355                 src_map = &tp->rx_jumbo_buffers[src_idx];
3356                 break;
3357
3358         default:
3359                 return;
3360         };
3361
3362         dest_map->skb = src_map->skb;
3363         pci_unmap_addr_set(dest_map, mapping,
3364                            pci_unmap_addr(src_map, mapping));
3365         dest_desc->addr_hi = src_desc->addr_hi;
3366         dest_desc->addr_lo = src_desc->addr_lo;
3367
3368         src_map->skb = NULL;
3369 }
3370
3371 #if TG3_VLAN_TAG_USED
3372 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3373 {
3374         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3375 }
3376 #endif
3377
3378 /* The RX ring scheme is composed of multiple rings which post fresh
3379  * buffers to the chip, and one special ring the chip uses to report
3380  * status back to the host.
3381  *
3382  * The special ring reports the status of received packets to the
3383  * host.  The chip does not write into the original descriptor the
3384  * RX buffer was obtained from.  The chip simply takes the original
3385  * descriptor as provided by the host, updates the status and length
3386  * field, then writes this into the next status ring entry.
3387  *
3388  * Each ring the host uses to post buffers to the chip is described
3389  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3390  * it is first placed into the on-chip ram.  When the packet's length
3391  * is known, it walks down the TG3_BDINFO entries to select the ring.
3392  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3393  * which is within the range of the new packet's length is chosen.
3394  *
3395  * The "separate ring for rx status" scheme may sound queer, but it makes
3396  * sense from a cache coherency perspective.  If only the host writes
3397  * to the buffer post rings, and only the chip writes to the rx status
3398  * rings, then cache lines never move beyond shared-modified state.
3399  * If both the host and chip were to write into the same ring, cache line
3400  * eviction could occur since both entities want it in an exclusive state.
3401  */
3402 static int tg3_rx(struct tg3 *tp, int budget)
3403 {
3404         u32 work_mask, rx_std_posted = 0;
3405         u32 sw_idx = tp->rx_rcb_ptr;
3406         u16 hw_idx;
3407         int received;
3408
3409         hw_idx = tp->hw_status->idx[0].rx_producer;
3410         /*
3411          * We need to order the read of hw_idx and the read of
3412          * the opaque cookie.
3413          */
3414         rmb();
3415         work_mask = 0;
3416         received = 0;
3417         while (sw_idx != hw_idx && budget > 0) {
3418                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3419                 unsigned int len;
3420                 struct sk_buff *skb;
3421                 dma_addr_t dma_addr;
3422                 u32 opaque_key, desc_idx, *post_ptr;
3423
3424                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3425                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3426                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3427                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3428                                                   mapping);
3429                         skb = tp->rx_std_buffers[desc_idx].skb;
3430                         post_ptr = &tp->rx_std_ptr;
3431                         rx_std_posted++;
3432                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3433                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3434                                                   mapping);
3435                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3436                         post_ptr = &tp->rx_jumbo_ptr;
3437                 }
3438                 else {
3439                         goto next_pkt_nopost;
3440                 }
3441
3442                 work_mask |= opaque_key;
3443
3444                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3445                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3446                 drop_it:
3447                         tg3_recycle_rx(tp, opaque_key,
3448                                        desc_idx, *post_ptr);
3449                 drop_it_no_recycle:
3450                         /* Other statistics kept track of by card. */
3451                         tp->net_stats.rx_dropped++;
3452                         goto next_pkt;
3453                 }
3454
3455                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3456
3457                 if (len > RX_COPY_THRESHOLD
3458                         && tp->rx_offset == 2
3459                         /* rx_offset != 2 iff this is a 5701 card running
3460                          * in PCI-X mode [see tg3_get_invariants()] */
3461                 ) {
3462                         int skb_size;
3463
3464                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3465                                                     desc_idx, *post_ptr);
3466                         if (skb_size < 0)
3467                                 goto drop_it;
3468
3469                         pci_unmap_single(tp->pdev, dma_addr,
3470                                          skb_size - tp->rx_offset,
3471                                          PCI_DMA_FROMDEVICE);
3472
3473                         skb_put(skb, len);
3474                 } else {
3475                         struct sk_buff *copy_skb;
3476
3477                         tg3_recycle_rx(tp, opaque_key,
3478                                        desc_idx, *post_ptr);
3479
3480                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3481                         if (copy_skb == NULL)
3482                                 goto drop_it_no_recycle;
3483
3484                         skb_reserve(copy_skb, 2);
3485                         skb_put(copy_skb, len);
3486                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3488                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3489
3490                         /* We'll reuse the original ring buffer. */
3491                         skb = copy_skb;
3492                 }
3493
3494                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3495                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3496                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3497                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3499                 else
3500                         skb->ip_summed = CHECKSUM_NONE;
3501
3502                 skb->protocol = eth_type_trans(skb, tp->dev);
3503 #if TG3_VLAN_TAG_USED
3504                 if (tp->vlgrp != NULL &&
3505                     desc->type_flags & RXD_FLAG_VLAN) {
3506                         tg3_vlan_rx(tp, skb,
3507                                     desc->err_vlan & RXD_VLAN_MASK);
3508                 } else
3509 #endif
3510                         netif_receive_skb(skb);
3511
3512                 tp->dev->last_rx = jiffies;
3513                 received++;
3514                 budget--;
3515
3516 next_pkt:
3517                 (*post_ptr)++;
3518
3519                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3520                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3521
3522                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3523                                      TG3_64BIT_REG_LOW, idx);
3524                         work_mask &= ~RXD_OPAQUE_RING_STD;
3525                         rx_std_posted = 0;
3526                 }
3527 next_pkt_nopost:
3528                 sw_idx++;
3529                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3530
3531                 /* Refresh hw_idx to see if there is new work */
3532                 if (sw_idx == hw_idx) {
3533                         hw_idx = tp->hw_status->idx[0].rx_producer;
3534                         rmb();
3535                 }
3536         }
3537
3538         /* ACK the status ring. */
3539         tp->rx_rcb_ptr = sw_idx;
3540         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3541
3542         /* Refill RX ring(s). */
3543         if (work_mask & RXD_OPAQUE_RING_STD) {
3544                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3545                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3546                              sw_idx);
3547         }
3548         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3549                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3550                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3551                              sw_idx);
3552         }
3553         mmiowb();
3554
3555         return received;
3556 }
3557
3558 static int tg3_poll(struct napi_struct *napi, int budget)
3559 {
3560         struct tg3 *tp = container_of(napi, struct tg3, napi);
3561         struct net_device *netdev = tp->dev;
3562         struct tg3_hw_status *sblk = tp->hw_status;
3563         int work_done = 0;
3564
3565         /* handle link change and other phy events */
3566         if (!(tp->tg3_flags &
3567               (TG3_FLAG_USE_LINKCHG_REG |
3568                TG3_FLAG_POLL_SERDES))) {
3569                 if (sblk->status & SD_STATUS_LINK_CHG) {
3570                         sblk->status = SD_STATUS_UPDATED |
3571                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3572                         spin_lock(&tp->lock);
3573                         tg3_setup_phy(tp, 0);
3574                         spin_unlock(&tp->lock);
3575                 }
3576         }
3577
3578         /* run TX completion thread */
3579         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3580                 tg3_tx(tp);
3581                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3582                         netif_rx_complete(netdev, napi);
3583                         schedule_work(&tp->reset_task);
3584                         return 0;
3585                 }
3586         }
3587
3588         /* run RX thread, within the bounds set by NAPI.
3589          * All RX "locking" is done by ensuring outside
3590          * code synchronizes with tg3->napi.poll()
3591          */
3592         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3593                 work_done = tg3_rx(tp, budget);
3594
3595         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3596                 tp->last_tag = sblk->status_tag;
3597                 rmb();
3598         } else
3599                 sblk->status &= ~SD_STATUS_UPDATED;
3600
3601         /* if no more work, tell net stack and NIC we're done */
3602         if (!tg3_has_work(tp)) {
3603                 netif_rx_complete(netdev, napi);
3604                 tg3_restart_ints(tp);
3605         }
3606
3607         return work_done;
3608 }
3609
3610 static void tg3_irq_quiesce(struct tg3 *tp)
3611 {
3612         BUG_ON(tp->irq_sync);
3613
3614         tp->irq_sync = 1;
3615         smp_mb();
3616
3617         synchronize_irq(tp->pdev->irq);
3618 }
3619
3620 static inline int tg3_irq_sync(struct tg3 *tp)
3621 {
3622         return tp->irq_sync;
3623 }
3624
3625 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3626  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3627  * with as well.  Most of the time, this is not necessary except when
3628  * shutting down the device.
3629  */
3630 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3631 {
3632         spin_lock_bh(&tp->lock);
3633         if (irq_sync)
3634                 tg3_irq_quiesce(tp);
3635 }
3636
3637 static inline void tg3_full_unlock(struct tg3 *tp)
3638 {
3639         spin_unlock_bh(&tp->lock);
3640 }
3641
3642 /* One-shot MSI handler - Chip automatically disables interrupt
3643  * after sending MSI so driver doesn't have to do it.
3644  */
3645 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3646 {
3647         struct net_device *dev = dev_id;
3648         struct tg3 *tp = netdev_priv(dev);
3649
3650         prefetch(tp->hw_status);
3651         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3652
3653         if (likely(!tg3_irq_sync(tp)))
3654                 netif_rx_schedule(dev, &tp->napi);
3655
3656         return IRQ_HANDLED;
3657 }
3658
3659 /* MSI ISR - No need to check for interrupt sharing and no need to
3660  * flush status block and interrupt mailbox. PCI ordering rules
3661  * guarantee that MSI will arrive after the status block.
3662  */
3663 static irqreturn_t tg3_msi(int irq, void *dev_id)
3664 {
3665         struct net_device *dev = dev_id;
3666         struct tg3 *tp = netdev_priv(dev);
3667
3668         prefetch(tp->hw_status);
3669         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3670         /*
3671          * Writing any value to intr-mbox-0 clears PCI INTA# and
3672          * chip-internal interrupt pending events.
3673          * Writing non-zero to intr-mbox-0 additional tells the
3674          * NIC to stop sending us irqs, engaging "in-intr-handler"
3675          * event coalescing.
3676          */
3677         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3678         if (likely(!tg3_irq_sync(tp)))
3679                 netif_rx_schedule(dev, &tp->napi);
3680
3681         return IRQ_RETVAL(1);
3682 }
3683
3684 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3685 {
3686         struct net_device *dev = dev_id;
3687         struct tg3 *tp = netdev_priv(dev);
3688         struct tg3_hw_status *sblk = tp->hw_status;
3689         unsigned int handled = 1;
3690
3691         /* In INTx mode, it is possible for the interrupt to arrive at
3692          * the CPU before the status block posted prior to the interrupt.
3693          * Reading the PCI State register will confirm whether the
3694          * interrupt is ours and will flush the status block.
3695          */
3696         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3697                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3698                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3699                         handled = 0;
3700                         goto out;
3701                 }
3702         }
3703
3704         /*
3705          * Writing any value to intr-mbox-0 clears PCI INTA# and
3706          * chip-internal interrupt pending events.
3707          * Writing non-zero to intr-mbox-0 additional tells the
3708          * NIC to stop sending us irqs, engaging "in-intr-handler"
3709          * event coalescing.
3710          *
3711          * Flush the mailbox to de-assert the IRQ immediately to prevent
3712          * spurious interrupts.  The flush impacts performance but
3713          * excessive spurious interrupts can be worse in some cases.
3714          */
3715         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3716         if (tg3_irq_sync(tp))
3717                 goto out;
3718         sblk->status &= ~SD_STATUS_UPDATED;
3719         if (likely(tg3_has_work(tp))) {
3720                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3721                 netif_rx_schedule(dev, &tp->napi);
3722         } else {
3723                 /* No work, shared interrupt perhaps?  re-enable
3724                  * interrupts, and flush that PCI write
3725                  */
3726                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3727                                0x00000000);
3728         }
3729 out:
3730         return IRQ_RETVAL(handled);
3731 }
3732
3733 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3734 {
3735         struct net_device *dev = dev_id;
3736         struct tg3 *tp = netdev_priv(dev);
3737         struct tg3_hw_status *sblk = tp->hw_status;
3738         unsigned int handled = 1;
3739
3740         /* In INTx mode, it is possible for the interrupt to arrive at
3741          * the CPU before the status block posted prior to the interrupt.
3742          * Reading the PCI State register will confirm whether the
3743          * interrupt is ours and will flush the status block.
3744          */
3745         if (unlikely(sblk->status_tag == tp->last_tag)) {
3746                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3747                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3748                         handled = 0;
3749                         goto out;
3750                 }
3751         }
3752
3753         /*
3754          * writing any value to intr-mbox-0 clears PCI INTA# and
3755          * chip-internal interrupt pending events.
3756          * writing non-zero to intr-mbox-0 additional tells the
3757          * NIC to stop sending us irqs, engaging "in-intr-handler"
3758          * event coalescing.
3759          *
3760          * Flush the mailbox to de-assert the IRQ immediately to prevent
3761          * spurious interrupts.  The flush impacts performance but
3762          * excessive spurious interrupts can be worse in some cases.
3763          */
3764         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3765         if (tg3_irq_sync(tp))
3766                 goto out;
3767         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3768                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3769                 /* Update last_tag to mark that this status has been
3770                  * seen. Because interrupt may be shared, we may be
3771                  * racing with tg3_poll(), so only update last_tag
3772                  * if tg3_poll() is not scheduled.
3773                  */
3774                 tp->last_tag = sblk->status_tag;
3775                 __netif_rx_schedule(dev, &tp->napi);
3776         }
3777 out:
3778         return IRQ_RETVAL(handled);
3779 }
3780
3781 /* ISR for interrupt test */
3782 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3783 {
3784         struct net_device *dev = dev_id;
3785         struct tg3 *tp = netdev_priv(dev);
3786         struct tg3_hw_status *sblk = tp->hw_status;
3787
3788         if ((sblk->status & SD_STATUS_UPDATED) ||
3789             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3790                 tg3_disable_ints(tp);
3791                 return IRQ_RETVAL(1);
3792         }
3793         return IRQ_RETVAL(0);
3794 }
3795
3796 static int tg3_init_hw(struct tg3 *, int);
3797 static int tg3_halt(struct tg3 *, int, int);
3798
3799 /* Restart hardware after configuration changes, self-test, etc.
3800  * Invoked with tp->lock held.
3801  */
3802 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3803 {
3804         int err;
3805
3806         err = tg3_init_hw(tp, reset_phy);
3807         if (err) {
3808                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3809                        "aborting.\n", tp->dev->name);
3810                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3811                 tg3_full_unlock(tp);
3812                 del_timer_sync(&tp->timer);
3813                 tp->irq_sync = 0;
3814                 napi_enable(&tp->napi);
3815                 dev_close(tp->dev);
3816                 tg3_full_lock(tp, 0);
3817         }
3818         return err;
3819 }
3820
3821 #ifdef CONFIG_NET_POLL_CONTROLLER
3822 static void tg3_poll_controller(struct net_device *dev)
3823 {
3824         struct tg3 *tp = netdev_priv(dev);
3825
3826         tg3_interrupt(tp->pdev->irq, dev);
3827 }
3828 #endif
3829
3830 static void tg3_reset_task(struct work_struct *work)
3831 {
3832         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3833         unsigned int restart_timer;
3834
3835         tg3_full_lock(tp, 0);
3836
3837         if (!netif_running(tp->dev)) {
3838                 tg3_full_unlock(tp);
3839                 return;
3840         }
3841
3842         tg3_full_unlock(tp);
3843
3844         tg3_netif_stop(tp);
3845
3846         tg3_full_lock(tp, 1);
3847
3848         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3849         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3850
3851         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3852                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3853                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3854                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3855                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3856         }
3857
3858         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3859         if (tg3_init_hw(tp, 1))
3860                 goto out;
3861
3862         tg3_netif_start(tp);
3863
3864         if (restart_timer)
3865                 mod_timer(&tp->timer, jiffies + 1);
3866
3867 out:
3868         tg3_full_unlock(tp);
3869 }
3870
3871 static void tg3_dump_short_state(struct tg3 *tp)
3872 {
3873         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3874                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3875         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3876                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3877 }
3878
3879 static void tg3_tx_timeout(struct net_device *dev)
3880 {
3881         struct tg3 *tp = netdev_priv(dev);
3882
3883         if (netif_msg_tx_err(tp)) {
3884                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3885                        dev->name);
3886                 tg3_dump_short_state(tp);
3887         }
3888
3889         schedule_work(&tp->reset_task);
3890 }
3891
3892 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3893 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3894 {
3895         u32 base = (u32) mapping & 0xffffffff;
3896
3897         return ((base > 0xffffdcc0) &&
3898                 (base + len + 8 < base));
3899 }
3900
3901 /* Test for DMA addresses > 40-bit */
3902 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3903                                           int len)
3904 {
3905 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3906         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3907                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3908         return 0;
3909 #else
3910         return 0;
3911 #endif
3912 }
3913
3914 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3915
3916 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3917 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3918                                        u32 last_plus_one, u32 *start,
3919                                        u32 base_flags, u32 mss)
3920 {
3921         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3922         dma_addr_t new_addr = 0;
3923         u32 entry = *start;
3924         int i, ret = 0;
3925
3926         if (!new_skb) {
3927                 ret = -1;
3928         } else {
3929                 /* New SKB is guaranteed to be linear. */
3930                 entry = *start;
3931                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3932                                           PCI_DMA_TODEVICE);
3933                 /* Make sure new skb does not cross any 4G boundaries.
3934                  * Drop the packet if it does.
3935                  */
3936                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3937                         ret = -1;
3938                         dev_kfree_skb(new_skb);
3939                         new_skb = NULL;
3940                 } else {
3941                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3942                                     base_flags, 1 | (mss << 1));
3943                         *start = NEXT_TX(entry);
3944                 }
3945         }
3946
3947         /* Now clean up the sw ring entries. */
3948         i = 0;
3949         while (entry != last_plus_one) {
3950                 int len;
3951
3952                 if (i == 0)
3953                         len = skb_headlen(skb);
3954                 else
3955                         len = skb_shinfo(skb)->frags[i-1].size;
3956                 pci_unmap_single(tp->pdev,
3957                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3958                                  len, PCI_DMA_TODEVICE);
3959                 if (i == 0) {
3960                         tp->tx_buffers[entry].skb = new_skb;
3961                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3962                 } else {
3963                         tp->tx_buffers[entry].skb = NULL;
3964                 }
3965                 entry = NEXT_TX(entry);
3966                 i++;
3967         }
3968
3969         dev_kfree_skb(skb);
3970
3971         return ret;
3972 }
3973
3974 static void tg3_set_txd(struct tg3 *tp, int entry,
3975                         dma_addr_t mapping, int len, u32 flags,
3976                         u32 mss_and_is_end)
3977 {
3978         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3979         int is_end = (mss_and_is_end & 0x1);
3980         u32 mss = (mss_and_is_end >> 1);
3981         u32 vlan_tag = 0;
3982
3983         if (is_end)
3984                 flags |= TXD_FLAG_END;
3985         if (flags & TXD_FLAG_VLAN) {
3986                 vlan_tag = flags >> 16;
3987                 flags &= 0xffff;
3988         }
3989         vlan_tag |= (mss << TXD_MSS_SHIFT);
3990
3991         txd->addr_hi = ((u64) mapping >> 32);
3992         txd->addr_lo = ((u64) mapping & 0xffffffff);
3993         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3994         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3995 }
3996
3997 /* hard_start_xmit for devices that don't have any bugs and
3998  * support TG3_FLG2_HW_TSO_2 only.
3999  */
4000 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4001 {
4002         struct tg3 *tp = netdev_priv(dev);
4003         dma_addr_t mapping;
4004         u32 len, entry, base_flags, mss;
4005
4006         len = skb_headlen(skb);
4007
4008         /* We are running in BH disabled context with netif_tx_lock
4009          * and TX reclaim runs via tp->napi.poll inside of a software
4010          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4011          * no IRQ context deadlocks to worry about either.  Rejoice!
4012          */
4013         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4014                 if (!netif_queue_stopped(dev)) {
4015                         netif_stop_queue(dev);
4016
4017                         /* This is a hard error, log it. */
4018                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4019                                "queue awake!\n", dev->name);
4020                 }
4021                 return NETDEV_TX_BUSY;
4022         }
4023
4024         entry = tp->tx_prod;
4025         base_flags = 0;
4026         mss = 0;
4027         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4028                 int tcp_opt_len, ip_tcp_len;
4029
4030                 if (skb_header_cloned(skb) &&
4031                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4032                         dev_kfree_skb(skb);
4033                         goto out_unlock;
4034                 }
4035
4036                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4037                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4038                 else {
4039                         struct iphdr *iph = ip_hdr(skb);
4040
4041                         tcp_opt_len = tcp_optlen(skb);
4042                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4043
4044                         iph->check = 0;
4045                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4046                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4047                 }
4048
4049                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4050                                TXD_FLAG_CPU_POST_DMA);
4051
4052                 tcp_hdr(skb)->check = 0;
4053
4054         }
4055         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4056                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4057 #if TG3_VLAN_TAG_USED
4058         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4059                 base_flags |= (TXD_FLAG_VLAN |
4060                                (vlan_tx_tag_get(skb) << 16));
4061 #endif
4062
4063         /* Queue skb data, a.k.a. the main skb fragment. */
4064         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4065
4066         tp->tx_buffers[entry].skb = skb;
4067         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4068
4069         tg3_set_txd(tp, entry, mapping, len, base_flags,
4070                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4071
4072         entry = NEXT_TX(entry);
4073
4074         /* Now loop through additional data fragments, and queue them. */
4075         if (skb_shinfo(skb)->nr_frags > 0) {
4076                 unsigned int i, last;
4077
4078                 last = skb_shinfo(skb)->nr_frags - 1;
4079                 for (i = 0; i <= last; i++) {
4080                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4081
4082                         len = frag->size;
4083                         mapping = pci_map_page(tp->pdev,
4084                                                frag->page,
4085                                                frag->page_offset,
4086                                                len, PCI_DMA_TODEVICE);
4087
4088                         tp->tx_buffers[entry].skb = NULL;
4089                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4090
4091                         tg3_set_txd(tp, entry, mapping, len,
4092                                     base_flags, (i == last) | (mss << 1));
4093
4094                         entry = NEXT_TX(entry);
4095                 }
4096         }
4097
4098         /* Packets are ready, update Tx producer idx local and on card. */
4099         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4100
4101         tp->tx_prod = entry;
4102         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4103                 netif_stop_queue(dev);
4104                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4105                         netif_wake_queue(tp->dev);
4106         }
4107
4108 out_unlock:
4109         mmiowb();
4110
4111         dev->trans_start = jiffies;
4112
4113         return NETDEV_TX_OK;
4114 }
4115
4116 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4117
4118 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4119  * TSO header is greater than 80 bytes.
4120  */
4121 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4122 {
4123         struct sk_buff *segs, *nskb;
4124
4125         /* Estimate the number of fragments in the worst case */
4126         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4127                 netif_stop_queue(tp->dev);
4128                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4129                         return NETDEV_TX_BUSY;
4130
4131                 netif_wake_queue(tp->dev);
4132         }
4133
4134         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4135         if (unlikely(IS_ERR(segs)))
4136                 goto tg3_tso_bug_end;
4137
4138         do {
4139                 nskb = segs;
4140                 segs = segs->next;
4141                 nskb->next = NULL;
4142                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4143         } while (segs);
4144
4145 tg3_tso_bug_end:
4146         dev_kfree_skb(skb);
4147
4148         return NETDEV_TX_OK;
4149 }
4150
4151 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4152  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4153  */
4154 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4155 {
4156         struct tg3 *tp = netdev_priv(dev);
4157         dma_addr_t mapping;
4158         u32 len, entry, base_flags, mss;
4159         int would_hit_hwbug;
4160
4161         len = skb_headlen(skb);
4162
4163         /* We are running in BH disabled context with netif_tx_lock
4164          * and TX reclaim runs via tp->napi.poll inside of a software
4165          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4166          * no IRQ context deadlocks to worry about either.  Rejoice!
4167          */
4168         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4169                 if (!netif_queue_stopped(dev)) {
4170                         netif_stop_queue(dev);
4171
4172                         /* This is a hard error, log it. */
4173                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4174                                "queue awake!\n", dev->name);
4175                 }
4176                 return NETDEV_TX_BUSY;
4177         }
4178
4179         entry = tp->tx_prod;
4180         base_flags = 0;
4181         if (skb->ip_summed == CHECKSUM_PARTIAL)
4182                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4183         mss = 0;
4184         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4185                 struct iphdr *iph;
4186                 int tcp_opt_len, ip_tcp_len, hdr_len;
4187
4188                 if (skb_header_cloned(skb) &&
4189                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4190                         dev_kfree_skb(skb);
4191                         goto out_unlock;
4192                 }
4193
4194                 tcp_opt_len = tcp_optlen(skb);
4195                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4196
4197                 hdr_len = ip_tcp_len + tcp_opt_len;
4198                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4199                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4200                         return (tg3_tso_bug(tp, skb));
4201
4202                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4203                                TXD_FLAG_CPU_POST_DMA);
4204
4205                 iph = ip_hdr(skb);
4206                 iph->check = 0;
4207                 iph->tot_len = htons(mss + hdr_len);
4208                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4209                         tcp_hdr(skb)->check = 0;
4210                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4211                 } else
4212                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4213                                                                  iph->daddr, 0,
4214                                                                  IPPROTO_TCP,
4215                                                                  0);
4216
4217                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4218                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4219                         if (tcp_opt_len || iph->ihl > 5) {
4220                                 int tsflags;
4221
4222                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4223                                 mss |= (tsflags << 11);
4224                         }
4225                 } else {
4226                         if (tcp_opt_len || iph->ihl > 5) {
4227                                 int tsflags;
4228
4229                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4230                                 base_flags |= tsflags << 12;
4231                         }
4232                 }
4233         }
4234 #if TG3_VLAN_TAG_USED
4235         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4236                 base_flags |= (TXD_FLAG_VLAN |
4237                                (vlan_tx_tag_get(skb) << 16));
4238 #endif
4239
4240         /* Queue skb data, a.k.a. the main skb fragment. */
4241         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4242
4243         tp->tx_buffers[entry].skb = skb;
4244         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4245
4246         would_hit_hwbug = 0;
4247
4248         if (tg3_4g_overflow_test(mapping, len))
4249                 would_hit_hwbug = 1;
4250
4251         tg3_set_txd(tp, entry, mapping, len, base_flags,
4252                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4253
4254         entry = NEXT_TX(entry);
4255
4256         /* Now loop through additional data fragments, and queue them. */
4257         if (skb_shinfo(skb)->nr_frags > 0) {
4258                 unsigned int i, last;
4259
4260                 last = skb_shinfo(skb)->nr_frags - 1;
4261                 for (i = 0; i <= last; i++) {
4262                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4263
4264                         len = frag->size;
4265                         mapping = pci_map_page(tp->pdev,
4266                                                frag->page,
4267                                                frag->page_offset,
4268                                                len, PCI_DMA_TODEVICE);
4269
4270                         tp->tx_buffers[entry].skb = NULL;
4271                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4272
4273                         if (tg3_4g_overflow_test(mapping, len))
4274                                 would_hit_hwbug = 1;
4275
4276                         if (tg3_40bit_overflow_test(tp, mapping, len))
4277                                 would_hit_hwbug = 1;
4278
4279                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4280                                 tg3_set_txd(tp, entry, mapping, len,
4281                                             base_flags, (i == last)|(mss << 1));
4282                         else
4283                                 tg3_set_txd(tp, entry, mapping, len,
4284                                             base_flags, (i == last));
4285
4286                         entry = NEXT_TX(entry);
4287                 }
4288         }
4289
4290         if (would_hit_hwbug) {
4291                 u32 last_plus_one = entry;
4292                 u32 start;
4293
4294                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4295                 start &= (TG3_TX_RING_SIZE - 1);
4296
4297                 /* If the workaround fails due to memory/mapping
4298                  * failure, silently drop this packet.
4299                  */
4300                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4301                                                 &start, base_flags, mss))
4302                         goto out_unlock;
4303
4304                 entry = start;
4305         }
4306
4307         /* Packets are ready, update Tx producer idx local and on card. */
4308         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4309
4310         tp->tx_prod = entry;
4311         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4312                 netif_stop_queue(dev);
4313                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4314                         netif_wake_queue(tp->dev);
4315         }
4316
4317 out_unlock:
4318         mmiowb();
4319
4320         dev->trans_start = jiffies;
4321
4322         return NETDEV_TX_OK;
4323 }
4324
4325 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4326                                int new_mtu)
4327 {
4328         dev->mtu = new_mtu;
4329
4330         if (new_mtu > ETH_DATA_LEN) {
4331                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4332                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4333                         ethtool_op_set_tso(dev, 0);
4334                 }
4335                 else
4336                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4337         } else {
4338                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4339                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4340                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4341         }
4342 }
4343
4344 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4345 {
4346         struct tg3 *tp = netdev_priv(dev);
4347         int err;
4348
4349         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4350                 return -EINVAL;
4351
4352         if (!netif_running(dev)) {
4353                 /* We'll just catch it later when the
4354                  * device is up'd.
4355                  */
4356                 tg3_set_mtu(dev, tp, new_mtu);
4357                 return 0;
4358         }
4359
4360         tg3_netif_stop(tp);
4361
4362         tg3_full_lock(tp, 1);
4363
4364         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4365
4366         tg3_set_mtu(dev, tp, new_mtu);
4367
4368         err = tg3_restart_hw(tp, 0);
4369
4370         if (!err)
4371                 tg3_netif_start(tp);
4372
4373         tg3_full_unlock(tp);
4374
4375         return err;
4376 }
4377
4378 /* Free up pending packets in all rx/tx rings.
4379  *
4380  * The chip has been shut down and the driver detached from
4381  * the networking, so no interrupts or new tx packets will
4382  * end up in the driver.  tp->{tx,}lock is not held and we are not
4383  * in an interrupt context and thus may sleep.
4384  */
4385 static void tg3_free_rings(struct tg3 *tp)
4386 {
4387         struct ring_info *rxp;
4388         int i;
4389
4390         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4391                 rxp = &tp->rx_std_buffers[i];
4392
4393                 if (rxp->skb == NULL)
4394                         continue;
4395                 pci_unmap_single(tp->pdev,
4396                                  pci_unmap_addr(rxp, mapping),
4397                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4398                                  PCI_DMA_FROMDEVICE);
4399                 dev_kfree_skb_any(rxp->skb);
4400                 rxp->skb = NULL;
4401         }
4402
4403         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4404                 rxp = &tp->rx_jumbo_buffers[i];
4405
4406                 if (rxp->skb == NULL)
4407                         continue;
4408                 pci_unmap_single(tp->pdev,
4409                                  pci_unmap_addr(rxp, mapping),
4410                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4411                                  PCI_DMA_FROMDEVICE);
4412                 dev_kfree_skb_any(rxp->skb);
4413                 rxp->skb = NULL;
4414         }
4415
4416         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4417                 struct tx_ring_info *txp;
4418                 struct sk_buff *skb;
4419                 int j;
4420
4421                 txp = &tp->tx_buffers[i];
4422                 skb = txp->skb;
4423
4424                 if (skb == NULL) {
4425                         i++;
4426                         continue;
4427                 }
4428
4429                 pci_unmap_single(tp->pdev,
4430                                  pci_unmap_addr(txp, mapping),
4431                                  skb_headlen(skb),
4432                                  PCI_DMA_TODEVICE);
4433                 txp->skb = NULL;
4434
4435                 i++;
4436
4437                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4438                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4439                         pci_unmap_page(tp->pdev,
4440                                        pci_unmap_addr(txp, mapping),
4441                                        skb_shinfo(skb)->frags[j].size,
4442                                        PCI_DMA_TODEVICE);
4443                         i++;
4444                 }
4445
4446                 dev_kfree_skb_any(skb);
4447         }
4448 }
4449
4450 /* Initialize tx/rx rings for packet processing.
4451  *
4452  * The chip has been shut down and the driver detached from
4453  * the networking, so no interrupts or new tx packets will
4454  * end up in the driver.  tp->{tx,}lock are held and thus
4455  * we may not sleep.
4456  */
4457 static int tg3_init_rings(struct tg3 *tp)
4458 {
4459         u32 i;
4460
4461         /* Free up all the SKBs. */
4462         tg3_free_rings(tp);
4463
4464         /* Zero out all descriptors. */
4465         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4466         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4467         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4468         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4469
4470         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4471         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4472             (tp->dev->mtu > ETH_DATA_LEN))
4473                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4474
4475         /* Initialize invariants of the rings, we only set this
4476          * stuff once.  This works because the card does not
4477          * write into the rx buffer posting rings.
4478          */
4479         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4480                 struct tg3_rx_buffer_desc *rxd;
4481
4482                 rxd = &tp->rx_std[i];
4483                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4484                         << RXD_LEN_SHIFT;
4485                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4486                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4487                                (i << RXD_OPAQUE_INDEX_SHIFT));
4488         }
4489
4490         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4491                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4492                         struct tg3_rx_buffer_desc *rxd;
4493
4494                         rxd = &tp->rx_jumbo[i];
4495                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4496                                 << RXD_LEN_SHIFT;
4497                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4498                                 RXD_FLAG_JUMBO;
4499                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4500                                (i << RXD_OPAQUE_INDEX_SHIFT));
4501                 }
4502         }
4503
4504         /* Now allocate fresh SKBs for each rx ring. */
4505         for (i = 0; i < tp->rx_pending; i++) {
4506                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4507                         printk(KERN_WARNING PFX
4508                                "%s: Using a smaller RX standard ring, "
4509                                "only %d out of %d buffers were allocated "
4510                                "successfully.\n",
4511                                tp->dev->name, i, tp->rx_pending);
4512                         if (i == 0)
4513                                 return -ENOMEM;
4514                         tp->rx_pending = i;
4515                         break;
4516                 }
4517         }
4518
4519         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4520                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4521                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4522                                              -1, i) < 0) {
4523                                 printk(KERN_WARNING PFX
4524                                        "%s: Using a smaller RX jumbo ring, "
4525                                        "only %d out of %d buffers were "
4526                                        "allocated successfully.\n",
4527                                        tp->dev->name, i, tp->rx_jumbo_pending);
4528                                 if (i == 0) {
4529                                         tg3_free_rings(tp);
4530                                         return -ENOMEM;
4531                                 }
4532                                 tp->rx_jumbo_pending = i;
4533                                 break;
4534                         }
4535                 }
4536         }
4537         return 0;
4538 }
4539
4540 /*
4541  * Must not be invoked with interrupt sources disabled and
4542  * the hardware shutdown down.
4543  */
4544 static void tg3_free_consistent(struct tg3 *tp)
4545 {
4546         kfree(tp->rx_std_buffers);
4547         tp->rx_std_buffers = NULL;
4548         if (tp->rx_std) {
4549                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4550                                     tp->rx_std, tp->rx_std_mapping);
4551                 tp->rx_std = NULL;
4552         }
4553         if (tp->rx_jumbo) {
4554                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4555                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4556                 tp->rx_jumbo = NULL;
4557         }
4558         if (tp->rx_rcb) {
4559                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4560                                     tp->rx_rcb, tp->rx_rcb_mapping);
4561                 tp->rx_rcb = NULL;
4562         }
4563         if (tp->tx_ring) {
4564                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4565                         tp->tx_ring, tp->tx_desc_mapping);
4566                 tp->tx_ring = NULL;
4567         }
4568         if (tp->hw_status) {
4569                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4570                                     tp->hw_status, tp->status_mapping);
4571                 tp->hw_status = NULL;
4572         }
4573         if (tp->hw_stats) {
4574                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4575                                     tp->hw_stats, tp->stats_mapping);
4576                 tp->hw_stats = NULL;
4577         }
4578 }
4579
4580 /*
4581  * Must not be invoked with interrupt sources disabled and
4582  * the hardware shutdown down.  Can sleep.
4583  */
4584 static int tg3_alloc_consistent(struct tg3 *tp)
4585 {
4586         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4587                                       (TG3_RX_RING_SIZE +
4588                                        TG3_RX_JUMBO_RING_SIZE)) +
4589                                      (sizeof(struct tx_ring_info) *
4590                                       TG3_TX_RING_SIZE),
4591                                      GFP_KERNEL);
4592         if (!tp->rx_std_buffers)
4593                 return -ENOMEM;
4594
4595         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4596         tp->tx_buffers = (struct tx_ring_info *)
4597                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4598
4599         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4600                                           &tp->rx_std_mapping);
4601         if (!tp->rx_std)
4602                 goto err_out;
4603
4604         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4605                                             &tp->rx_jumbo_mapping);
4606
4607         if (!tp->rx_jumbo)
4608                 goto err_out;
4609
4610         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4611                                           &tp->rx_rcb_mapping);
4612         if (!tp->rx_rcb)
4613                 goto err_out;
4614
4615         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4616                                            &tp->tx_desc_mapping);
4617         if (!tp->tx_ring)
4618                 goto err_out;
4619
4620         tp->hw_status = pci_alloc_consistent(tp->pdev,
4621                                              TG3_HW_STATUS_SIZE,
4622                                              &tp->status_mapping);
4623         if (!tp->hw_status)
4624                 goto err_out;
4625
4626         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4627                                             sizeof(struct tg3_hw_stats),
4628                                             &tp->stats_mapping);
4629         if (!tp->hw_stats)
4630                 goto err_out;
4631
4632         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4633         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4634
4635         return 0;
4636
4637 err_out:
4638         tg3_free_consistent(tp);
4639         return -ENOMEM;
4640 }
4641
4642 #define MAX_WAIT_CNT 1000
4643
4644 /* To stop a block, clear the enable bit and poll till it
4645  * clears.  tp->lock is held.
4646  */
4647 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4648 {
4649         unsigned int i;
4650         u32 val;
4651
4652         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4653                 switch (ofs) {
4654                 case RCVLSC_MODE:
4655                 case DMAC_MODE:
4656                 case MBFREE_MODE:
4657                 case BUFMGR_MODE:
4658                 case MEMARB_MODE:
4659                         /* We can't enable/disable these bits of the
4660                          * 5705/5750, just say success.
4661                          */
4662                         return 0;
4663
4664                 default:
4665                         break;
4666                 };
4667         }
4668
4669         val = tr32(ofs);
4670         val &= ~enable_bit;
4671         tw32_f(ofs, val);
4672
4673         for (i = 0; i < MAX_WAIT_CNT; i++) {
4674                 udelay(100);
4675                 val = tr32(ofs);
4676                 if ((val & enable_bit) == 0)
4677                         break;
4678         }
4679
4680         if (i == MAX_WAIT_CNT && !silent) {
4681                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4682                        "ofs=%lx enable_bit=%x\n",
4683                        ofs, enable_bit);
4684                 return -ENODEV;
4685         }
4686
4687         return 0;
4688 }
4689
4690 /* tp->lock is held. */
4691 static int tg3_abort_hw(struct tg3 *tp, int silent)
4692 {
4693         int i, err;
4694
4695         tg3_disable_ints(tp);
4696
4697         tp->rx_mode &= ~RX_MODE_ENABLE;
4698         tw32_f(MAC_RX_MODE, tp->rx_mode);
4699         udelay(10);
4700
4701         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4702         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4703         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4704         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4705         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4706         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4707
4708         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4709         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4710         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4711         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4712         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4713         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4714         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4715
4716         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4717         tw32_f(MAC_MODE, tp->mac_mode);
4718         udelay(40);
4719
4720         tp->tx_mode &= ~TX_MODE_ENABLE;
4721         tw32_f(MAC_TX_MODE, tp->tx_mode);
4722
4723         for (i = 0; i < MAX_WAIT_CNT; i++) {
4724                 udelay(100);
4725                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4726                         break;
4727         }
4728         if (i >= MAX_WAIT_CNT) {
4729                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4730                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4731                        tp->dev->name, tr32(MAC_TX_MODE));
4732                 err |= -ENODEV;
4733         }
4734
4735         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4736         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4737         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4738
4739         tw32(FTQ_RESET, 0xffffffff);
4740         tw32(FTQ_RESET, 0x00000000);
4741
4742         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4743         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4744
4745         if (tp->hw_status)
4746                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4747         if (tp->hw_stats)
4748                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4749
4750         return err;
4751 }
4752
4753 /* tp->lock is held. */
4754 static int tg3_nvram_lock(struct tg3 *tp)
4755 {
4756         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4757                 int i;
4758
4759                 if (tp->nvram_lock_cnt == 0) {
4760                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4761                         for (i = 0; i < 8000; i++) {
4762                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4763                                         break;
4764                                 udelay(20);
4765                         }
4766                         if (i == 8000) {
4767                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4768                                 return -ENODEV;
4769                         }
4770                 }
4771                 tp->nvram_lock_cnt++;
4772         }
4773         return 0;
4774 }
4775
4776 /* tp->lock is held. */
4777 static void tg3_nvram_unlock(struct tg3 *tp)
4778 {
4779         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4780                 if (tp->nvram_lock_cnt > 0)
4781                         tp->nvram_lock_cnt--;
4782                 if (tp->nvram_lock_cnt == 0)
4783                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4784         }
4785 }
4786
4787 /* tp->lock is held. */
4788 static void tg3_enable_nvram_access(struct tg3 *tp)
4789 {
4790         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4791             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4792                 u32 nvaccess = tr32(NVRAM_ACCESS);
4793
4794                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4795         }
4796 }
4797
4798 /* tp->lock is held. */
4799 static void tg3_disable_nvram_access(struct tg3 *tp)
4800 {
4801         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4802             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4803                 u32 nvaccess = tr32(NVRAM_ACCESS);
4804
4805                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4806         }
4807 }
4808
4809 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4810 {
4811         int i;
4812         u32 apedata;
4813
4814         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4815         if (apedata != APE_SEG_SIG_MAGIC)
4816                 return;
4817
4818         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4819         if (apedata != APE_FW_STATUS_READY)
4820                 return;
4821
4822         /* Wait for up to 1 millisecond for APE to service previous event. */
4823         for (i = 0; i < 10; i++) {
4824                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4825                         return;
4826
4827                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4828
4829                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4830                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4831                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4832
4833                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4834
4835                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4836                         break;
4837
4838                 udelay(100);
4839         }
4840
4841         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4842                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4843 }
4844
4845 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4846 {
4847         u32 event;
4848         u32 apedata;
4849
4850         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4851                 return;
4852
4853         switch (kind) {
4854                 case RESET_KIND_INIT:
4855                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4856                                         APE_HOST_SEG_SIG_MAGIC);
4857                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4858                                         APE_HOST_SEG_LEN_MAGIC);
4859                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4860                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4861                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4862                                         APE_HOST_DRIVER_ID_MAGIC);
4863                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4864                                         APE_HOST_BEHAV_NO_PHYLOCK);
4865
4866                         event = APE_EVENT_STATUS_STATE_START;
4867                         break;
4868                 case RESET_KIND_SHUTDOWN:
4869                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4870                         break;
4871                 case RESET_KIND_SUSPEND:
4872                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4873                         break;
4874                 default:
4875                         return;
4876         }
4877
4878         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4879
4880         tg3_ape_send_event(tp, event);
4881 }
4882
4883 /* tp->lock is held. */
4884 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4885 {
4886         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4887                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4888
4889         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4890                 switch (kind) {
4891                 case RESET_KIND_INIT:
4892                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4893                                       DRV_STATE_START);
4894                         break;
4895
4896                 case RESET_KIND_SHUTDOWN:
4897                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4898                                       DRV_STATE_UNLOAD);
4899                         break;
4900
4901                 case RESET_KIND_SUSPEND:
4902                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4903                                       DRV_STATE_SUSPEND);
4904                         break;
4905
4906                 default:
4907                         break;
4908                 };
4909         }
4910
4911         if (kind == RESET_KIND_INIT ||
4912             kind == RESET_KIND_SUSPEND)
4913                 tg3_ape_driver_state_change(tp, kind);
4914 }
4915
4916 /* tp->lock is held. */
4917 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4918 {
4919         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4920                 switch (kind) {
4921                 case RESET_KIND_INIT:
4922                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4923                                       DRV_STATE_START_DONE);
4924                         break;
4925
4926                 case RESET_KIND_SHUTDOWN:
4927                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4928                                       DRV_STATE_UNLOAD_DONE);
4929                         break;
4930
4931                 default:
4932                         break;
4933                 };
4934         }
4935
4936         if (kind == RESET_KIND_SHUTDOWN)
4937                 tg3_ape_driver_state_change(tp, kind);
4938 }
4939
4940 /* tp->lock is held. */
4941 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4942 {
4943         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4944                 switch (kind) {
4945                 case RESET_KIND_INIT:
4946                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4947                                       DRV_STATE_START);
4948                         break;
4949
4950                 case RESET_KIND_SHUTDOWN:
4951                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4952                                       DRV_STATE_UNLOAD);
4953                         break;
4954
4955                 case RESET_KIND_SUSPEND:
4956                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4957                                       DRV_STATE_SUSPEND);
4958                         break;
4959
4960                 default:
4961                         break;
4962                 };
4963         }
4964 }
4965
4966 static int tg3_poll_fw(struct tg3 *tp)
4967 {
4968         int i;
4969         u32 val;
4970
4971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4972                 /* Wait up to 20ms for init done. */
4973                 for (i = 0; i < 200; i++) {
4974                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4975                                 return 0;
4976                         udelay(100);
4977                 }
4978                 return -ENODEV;
4979         }
4980
4981         /* Wait for firmware initialization to complete. */
4982         for (i = 0; i < 100000; i++) {
4983                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4984                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4985                         break;
4986                 udelay(10);
4987         }
4988
4989         /* Chip might not be fitted with firmware.  Some Sun onboard
4990          * parts are configured like that.  So don't signal the timeout
4991          * of the above loop as an error, but do report the lack of
4992          * running firmware once.
4993          */
4994         if (i >= 100000 &&
4995             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4996                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4997
4998                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4999                        tp->dev->name);
5000         }
5001
5002         return 0;
5003 }
5004
5005 /* Save PCI command register before chip reset */
5006 static void tg3_save_pci_state(struct tg3 *tp)
5007 {
5008         u32 val;
5009
5010         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5011         tp->pci_cmd = val;
5012 }
5013
5014 /* Restore PCI state after chip reset */
5015 static void tg3_restore_pci_state(struct tg3 *tp)
5016 {
5017         u32 val;
5018
5019         /* Re-enable indirect register accesses. */
5020         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5021                                tp->misc_host_ctrl);
5022
5023         /* Set MAX PCI retry to zero. */
5024         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5025         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5026             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5027                 val |= PCISTATE_RETRY_SAME_DMA;
5028         /* Allow reads and writes to the APE register and memory space. */
5029         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5030                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5031                        PCISTATE_ALLOW_APE_SHMEM_WR;
5032         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5033
5034         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5035
5036         /* Make sure PCI-X relaxed ordering bit is clear. */
5037         if (tp->pcix_cap) {
5038                 u16 pcix_cmd;
5039
5040                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5041                                      &pcix_cmd);
5042                 pcix_cmd &= ~PCI_X_CMD_ERO;
5043                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5044                                       pcix_cmd);
5045         }
5046
5047         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5048
5049                 /* Chip reset on 5780 will reset MSI enable bit,
5050                  * so need to restore it.
5051                  */
5052                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5053                         u16 ctrl;
5054
5055                         pci_read_config_word(tp->pdev,
5056                                              tp->msi_cap + PCI_MSI_FLAGS,
5057                                              &ctrl);
5058                         pci_write_config_word(tp->pdev,
5059                                               tp->msi_cap + PCI_MSI_FLAGS,
5060                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5061                         val = tr32(MSGINT_MODE);
5062                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5063                 }
5064         }
5065 }
5066
5067 static void tg3_stop_fw(struct tg3 *);
5068
5069 /* tp->lock is held. */
5070 static int tg3_chip_reset(struct tg3 *tp)
5071 {
5072         u32 val;
5073         void (*write_op)(struct tg3 *, u32, u32);
5074         int err;
5075
5076         tg3_nvram_lock(tp);
5077
5078         /* No matching tg3_nvram_unlock() after this because
5079          * chip reset below will undo the nvram lock.
5080          */
5081         tp->nvram_lock_cnt = 0;
5082
5083         /* GRC_MISC_CFG core clock reset will clear the memory
5084          * enable bit in PCI register 4 and the MSI enable bit
5085          * on some chips, so we save relevant registers here.
5086          */
5087         tg3_save_pci_state(tp);
5088
5089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5094                 tw32(GRC_FASTBOOT_PC, 0);
5095
5096         /*
5097          * We must avoid the readl() that normally takes place.
5098          * It locks machines, causes machine checks, and other
5099          * fun things.  So, temporarily disable the 5701
5100          * hardware workaround, while we do the reset.
5101          */
5102         write_op = tp->write32;
5103         if (write_op == tg3_write_flush_reg32)
5104                 tp->write32 = tg3_write32;
5105
5106         /* Prevent the irq handler from reading or writing PCI registers
5107          * during chip reset when the memory enable bit in the PCI command
5108          * register may be cleared.  The chip does not generate interrupt
5109          * at this time, but the irq handler may still be called due to irq
5110          * sharing or irqpoll.
5111          */
5112         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5113         if (tp->hw_status) {
5114                 tp->hw_status->status = 0;
5115                 tp->hw_status->status_tag = 0;
5116         }
5117         tp->last_tag = 0;
5118         smp_mb();
5119         synchronize_irq(tp->pdev->irq);
5120
5121         /* do the reset */
5122         val = GRC_MISC_CFG_CORECLK_RESET;
5123
5124         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5125                 if (tr32(0x7e2c) == 0x60) {
5126                         tw32(0x7e2c, 0x20);
5127                 }
5128                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5129                         tw32(GRC_MISC_CFG, (1 << 29));
5130                         val |= (1 << 29);
5131                 }
5132         }
5133
5134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5135                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5136                 tw32(GRC_VCPU_EXT_CTRL,
5137                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5138         }
5139
5140         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5141                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5142         tw32(GRC_MISC_CFG, val);
5143
5144         /* restore 5701 hardware bug workaround write method */
5145         tp->write32 = write_op;
5146
5147         /* Unfortunately, we have to delay before the PCI read back.
5148          * Some 575X chips even will not respond to a PCI cfg access
5149          * when the reset command is given to the chip.
5150          *
5151          * How do these hardware designers expect things to work
5152          * properly if the PCI write is posted for a long period
5153          * of time?  It is always necessary to have some method by
5154          * which a register read back can occur to push the write
5155          * out which does the reset.
5156          *
5157          * For most tg3 variants the trick below was working.
5158          * Ho hum...
5159          */
5160         udelay(120);
5161
5162         /* Flush PCI posted writes.  The normal MMIO registers
5163          * are inaccessible at this time so this is the only
5164          * way to make this reliably (actually, this is no longer
5165          * the case, see above).  I tried to use indirect
5166          * register read/write but this upset some 5701 variants.
5167          */
5168         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5169
5170         udelay(120);
5171
5172         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5173                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5174                         int i;
5175                         u32 cfg_val;
5176
5177                         /* Wait for link training to complete.  */
5178                         for (i = 0; i < 5000; i++)
5179                                 udelay(100);
5180
5181                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5182                         pci_write_config_dword(tp->pdev, 0xc4,
5183                                                cfg_val | (1 << 15));
5184                 }
5185                 /* Set PCIE max payload size and clear error status.  */
5186                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5187         }
5188
5189         tg3_restore_pci_state(tp);
5190
5191         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5192
5193         val = 0;
5194         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5195                 val = tr32(MEMARB_MODE);
5196         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5197
5198         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5199                 tg3_stop_fw(tp);
5200                 tw32(0x5000, 0x400);
5201         }
5202
5203         tw32(GRC_MODE, tp->grc_mode);
5204
5205         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5206                 val = tr32(0xc4);
5207
5208                 tw32(0xc4, val | (1 << 15));
5209         }
5210
5211         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5213                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5214                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5215                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5216                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5217         }
5218
5219         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5220                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5221                 tw32_f(MAC_MODE, tp->mac_mode);
5222         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5223                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5224                 tw32_f(MAC_MODE, tp->mac_mode);
5225         } else
5226                 tw32_f(MAC_MODE, 0);
5227         udelay(40);
5228
5229         err = tg3_poll_fw(tp);
5230         if (err)
5231                 return err;
5232
5233         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5234             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5235                 val = tr32(0x7c00);
5236
5237                 tw32(0x7c00, val | (1 << 25));
5238         }
5239
5240         /* Reprobe ASF enable state.  */
5241         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5242         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5243         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5244         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5245                 u32 nic_cfg;
5246
5247                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5248                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5249                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5250                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5251                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5252                 }
5253         }
5254
5255         return 0;
5256 }
5257
5258 /* tp->lock is held. */
5259 static void tg3_stop_fw(struct tg3 *tp)
5260 {
5261         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5262            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5263                 u32 val;
5264                 int i;
5265
5266                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5267                 val = tr32(GRC_RX_CPU_EVENT);
5268                 val |= (1 << 14);
5269                 tw32(GRC_RX_CPU_EVENT, val);
5270
5271                 /* Wait for RX cpu to ACK the event.  */
5272                 for (i = 0; i < 100; i++) {
5273                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5274                                 break;
5275                         udelay(1);
5276                 }
5277         }
5278 }
5279
5280 /* tp->lock is held. */
5281 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5282 {
5283         int err;
5284
5285         tg3_stop_fw(tp);
5286
5287         tg3_write_sig_pre_reset(tp, kind);
5288
5289         tg3_abort_hw(tp, silent);
5290         err = tg3_chip_reset(tp);
5291
5292         tg3_write_sig_legacy(tp, kind);
5293         tg3_write_sig_post_reset(tp, kind);
5294
5295         if (err)
5296                 return err;
5297
5298         return 0;
5299 }
5300
5301 #define TG3_FW_RELEASE_MAJOR    0x0
5302 #define TG3_FW_RELASE_MINOR     0x0
5303 #define TG3_FW_RELEASE_FIX      0x0
5304 #define TG3_FW_START_ADDR       0x08000000
5305 #define TG3_FW_TEXT_ADDR        0x08000000
5306 #define TG3_FW_TEXT_LEN         0x9c0
5307 #define TG3_FW_RODATA_ADDR      0x080009c0
5308 #define TG3_FW_RODATA_LEN       0x60
5309 #define TG3_FW_DATA_ADDR        0x08000a40
5310 #define TG3_FW_DATA_LEN         0x20
5311 #define TG3_FW_SBSS_ADDR        0x08000a60
5312 #define TG3_FW_SBSS_LEN         0xc
5313 #define TG3_FW_BSS_ADDR         0x08000a70
5314 #define TG3_FW_BSS_LEN          0x10
5315
5316 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5317         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5318         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5319         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5320         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5321         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5322         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5323         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5324         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5325         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5326         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5327         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5328         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5329         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5330         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5331         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5332         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5333         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5334         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5335         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5336         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5337         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5338         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5339         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5340         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5341         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5342         0, 0, 0, 0, 0, 0,
5343         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5344         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5345         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5346         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5347         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5348         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5349         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5350         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5351         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5352         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5353         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5354         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5355         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5356         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5357         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5358         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5359         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5360         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5361         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5362         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5363         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5364         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5365         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5366         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5367         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5368         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5369         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5370         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5371         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5372         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5373         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5374         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5375         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5376         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5377         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5378         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5379         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5380         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5381         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5382         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5383         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5384         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5385         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5386         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5387         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5388         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5389         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5390         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5391         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5392         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5393         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5394         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5395         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5396         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5397         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5398         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5399         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5400         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5401         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5402         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5403         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5404         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5405         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5406         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5407         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5408 };
5409
5410 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5411         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5412         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5413         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5414         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5415         0x00000000
5416 };
5417
5418 #if 0 /* All zeros, don't eat up space with it. */
5419 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5420         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5421         0x00000000, 0x00000000, 0x00000000, 0x00000000
5422 };
5423 #endif
5424
5425 #define RX_CPU_SCRATCH_BASE     0x30000
5426 #define RX_CPU_SCRATCH_SIZE     0x04000
5427 #define TX_CPU_SCRATCH_BASE     0x34000
5428 #define TX_CPU_SCRATCH_SIZE     0x04000
5429
5430 /* tp->lock is held. */
5431 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5432 {
5433         int i;
5434
5435         BUG_ON(offset == TX_CPU_BASE &&
5436             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5437
5438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5439                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5440
5441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5442                 return 0;
5443         }
5444         if (offset == RX_CPU_BASE) {
5445                 for (i = 0; i < 10000; i++) {
5446                         tw32(offset + CPU_STATE, 0xffffffff);
5447                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5448                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5449                                 break;
5450                 }
5451
5452                 tw32(offset + CPU_STATE, 0xffffffff);
5453                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5454                 udelay(10);
5455         } else {
5456                 for (i = 0; i < 10000; i++) {
5457                         tw32(offset + CPU_STATE, 0xffffffff);
5458                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5459                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5460                                 break;
5461                 }
5462         }
5463
5464         if (i >= 10000) {
5465                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5466                        "and %s CPU\n",
5467                        tp->dev->name,
5468                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5469                 return -ENODEV;
5470         }
5471
5472         /* Clear firmware's nvram arbitration. */
5473         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5474                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5475         return 0;
5476 }
5477
5478 struct fw_info {
5479         unsigned int text_base;
5480         unsigned int text_len;
5481         const u32 *text_data;
5482         unsigned int rodata_base;
5483         unsigned int rodata_len;
5484         const u32 *rodata_data;
5485         unsigned int data_base;
5486         unsigned int data_len;
5487         const u32 *data_data;
5488 };
5489
5490 /* tp->lock is held. */
5491 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5492                                  int cpu_scratch_size, struct fw_info *info)
5493 {
5494         int err, lock_err, i;
5495         void (*write_op)(struct tg3 *, u32, u32);
5496
5497         if (cpu_base == TX_CPU_BASE &&
5498             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5499                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5500                        "TX cpu firmware on %s which is 5705.\n",
5501                        tp->dev->name);
5502                 return -EINVAL;
5503         }
5504
5505         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5506                 write_op = tg3_write_mem;
5507         else
5508                 write_op = tg3_write_indirect_reg32;
5509
5510         /* It is possible that bootcode is still loading at this point.
5511          * Get the nvram lock first before halting the cpu.
5512          */
5513         lock_err = tg3_nvram_lock(tp);
5514         err = tg3_halt_cpu(tp, cpu_base);
5515         if (!lock_err)
5516                 tg3_nvram_unlock(tp);
5517         if (err)
5518                 goto out;
5519
5520         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5521                 write_op(tp, cpu_scratch_base + i, 0);
5522         tw32(cpu_base + CPU_STATE, 0xffffffff);
5523         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5524         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5525                 write_op(tp, (cpu_scratch_base +
5526                               (info->text_base & 0xffff) +
5527                               (i * sizeof(u32))),
5528                          (info->text_data ?
5529                           info->text_data[i] : 0));
5530         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5531                 write_op(tp, (cpu_scratch_base +
5532                               (info->rodata_base & 0xffff) +
5533                               (i * sizeof(u32))),
5534                          (info->rodata_data ?
5535                           info->rodata_data[i] : 0));
5536         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5537                 write_op(tp, (cpu_scratch_base +
5538                               (info->data_base & 0xffff) +
5539                               (i * sizeof(u32))),
5540                          (info->data_data ?
5541                           info->data_data[i] : 0));
5542
5543         err = 0;
5544
5545 out:
5546         return err;
5547 }
5548
5549 /* tp->lock is held. */
5550 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5551 {
5552         struct fw_info info;
5553         int err, i;
5554
5555         info.text_base = TG3_FW_TEXT_ADDR;
5556         info.text_len = TG3_FW_TEXT_LEN;
5557         info.text_data = &tg3FwText[0];
5558         info.rodata_base = TG3_FW_RODATA_ADDR;
5559         info.rodata_len = TG3_FW_RODATA_LEN;
5560         info.rodata_data = &tg3FwRodata[0];
5561         info.data_base = TG3_FW_DATA_ADDR;
5562         info.data_len = TG3_FW_DATA_LEN;
5563         info.data_data = NULL;
5564
5565         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5566                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5567                                     &info);
5568         if (err)
5569                 return err;
5570
5571         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5572                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5573                                     &info);
5574         if (err)
5575                 return err;
5576
5577         /* Now startup only the RX cpu. */
5578         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5579         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5580
5581         for (i = 0; i < 5; i++) {
5582                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5583                         break;
5584                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5585                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5586                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5587                 udelay(1000);
5588         }
5589         if (i >= 5) {
5590                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5591                        "to set RX CPU PC, is %08x should be %08x\n",
5592                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5593                        TG3_FW_TEXT_ADDR);
5594                 return -ENODEV;
5595         }
5596         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5597         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5598
5599         return 0;
5600 }
5601
5602
5603 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5604 #define TG3_TSO_FW_RELASE_MINOR         0x6
5605 #define TG3_TSO_FW_RELEASE_FIX          0x0
5606 #define TG3_TSO_FW_START_ADDR           0x08000000
5607 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5608 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5609 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5610 #define TG3_TSO_FW_RODATA_LEN           0x60
5611 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5612 #define TG3_TSO_FW_DATA_LEN             0x30
5613 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5614 #define TG3_TSO_FW_SBSS_LEN             0x2c
5615 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5616 #define TG3_TSO_FW_BSS_LEN              0x894
5617
5618 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5619         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5620         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5621         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5622         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5623         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5624         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5625         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5626         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5627         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5628         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5629         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5630         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5631         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5632         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5633         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5634         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5635         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5636         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5637         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5638         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5639         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5640         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5641         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5642         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5643         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5644         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5645         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5646         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5647         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5648         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5649         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5650         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5651         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5652         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5653         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5654         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5655         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5656         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5657         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5658         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5659         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5660         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5661         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5662         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5663         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5664         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5665         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5666         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5667         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5668         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5669         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5670         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5671         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5672         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5673         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5674         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5675         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5676         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5677         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5678         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5679         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5680         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5681         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5682         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5683         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5684         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5685         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5686         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5687         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5688         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5689         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5690         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5691         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5692         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5693         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5694         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5695         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5696         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5697         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5698         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5699         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5700         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5701         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5702         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5703         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5704         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5705         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5706         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5707         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5708         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5709         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5710         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5711         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5712         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5713         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5714         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5715         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5716         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5717         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5718         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5719         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5720         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5721         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5722         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5723         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5724         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5725         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5726         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5727         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5728         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5729         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5730         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5731         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5732         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5733         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5734         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5735         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5736         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5737         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5738         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5739         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5740         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5741         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5742         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5743         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5744         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5745         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5746         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5747         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5748         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5749         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5750         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5751         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5752         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5753         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5754         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5755         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5756         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5757         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5758         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5759         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5760         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5761         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5762         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5763         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5764         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5765         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5766         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5767         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5768         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5769         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5770         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5771         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5772         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5773         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5774         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5775         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5776         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5777         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5778         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5779         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5780         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5781         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5782         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5783         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5784         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5785         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5786         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5787         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5788         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5789         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5790         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5791         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5792         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5793         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5794         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5795         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5796         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5797         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5798         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5799         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5800         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5801         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5802         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5803         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5804         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5805         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5806         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5807         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5808         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5809         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5810         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5811         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5812         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5813         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5814         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5815         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5816         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5817         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5818         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5819         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5820         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5821         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5822         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5823         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5824         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5825         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5826         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5827         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5828         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5829         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5830         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5831         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5832         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5833         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5834         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5835         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5836         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5837         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5838         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5839         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5840         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5841         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5842         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5843         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5844         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5845         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5846         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5847         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5848         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5849         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5850         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5851         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5852         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5853         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5854         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5855         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5856         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5857         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5858         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5859         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5860         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5861         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5862         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5863         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5864         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5865         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5866         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5867         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5868         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5869         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5870         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5871         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5872         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5873         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5874         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5875         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5876         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5877         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5878         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5879         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5880         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5881         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5882         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5883         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5884         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5885         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5886         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5887         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5888         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5889         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5890         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5891         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5892         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5893         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5894         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5895         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5896         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5897         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5898         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5899         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5900         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5901         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5902         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5903 };
5904
5905 static const u32 tg3TsoFwRodata[] = {
5906         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5907         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5908         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5909         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5910         0x00000000,
5911 };
5912
5913 static const u32 tg3TsoFwData[] = {
5914         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5915         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5916         0x00000000,
5917 };
5918
5919 /* 5705 needs a special version of the TSO firmware.  */
5920 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5921 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5922 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5923 #define TG3_TSO5_FW_START_ADDR          0x00010000
5924 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5925 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5926 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5927 #define TG3_TSO5_FW_RODATA_LEN          0x50
5928 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5929 #define TG3_TSO5_FW_DATA_LEN            0x20
5930 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5931 #define TG3_TSO5_FW_SBSS_LEN            0x28
5932 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5933 #define TG3_TSO5_FW_BSS_LEN             0x88
5934
5935 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5936         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5937         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5938         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5939         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5940         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5941         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5942         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5943         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5944         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5945         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5946         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5947         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5948         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5949         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5950         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5951         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5952         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5953         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5954         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5955         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5956         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5957         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5958         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5959         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5960         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5961         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5962         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5963         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5964         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5965         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5966         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5967         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5968         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5969         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5970         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5971         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5972         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5973         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5974         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5975         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5976         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5977         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5978         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5979         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5980         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5981         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5982         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5983         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5984         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5985         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5986         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5987         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5988         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5989         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5990         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5991         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5992         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5993         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5994         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5995         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5996         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5997         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5998         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5999         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6000         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6001         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6002         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6003         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6004         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6005         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6006         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6007         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6008         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6009         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6010         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6011         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6012         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6013         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6014         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6015         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6016         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6017         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6018         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6019         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6020         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6021         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6022         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6023         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6024         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6025         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6026         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6027         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6028         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6029         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6030         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6031         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6032         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6033         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6034         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6035         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6036         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6037         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6038         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6039         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6040         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6041         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6042         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6043         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6044         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6045         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6046         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6047         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6048         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6049         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6050         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6051         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6052         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6053         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6054         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6055         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6056         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6057         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6058         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6059         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6060         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6061         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6062         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6063         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6064         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6065         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6066         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6067         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6068         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6069         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6070         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6071         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6072         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6073         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6074         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6075         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6076         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6077         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6078         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6079         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6080         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6081         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6082         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6083         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6084         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6085         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6086         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6087         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6088         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6089         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6090         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6091         0x00000000, 0x00000000, 0x00000000,
6092 };
6093
6094 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6095         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6096         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6097         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6098         0x00000000, 0x00000000, 0x00000000,
6099 };
6100
6101 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6102         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6103         0x00000000, 0x00000000, 0x00000000,
6104 };
6105
6106 /* tp->lock is held. */
6107 static int tg3_load_tso_firmware(struct tg3 *tp)
6108 {
6109         struct fw_info info;
6110         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6111         int err, i;
6112
6113         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6114                 return 0;
6115
6116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6117                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6118                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6119                 info.text_data = &tg3Tso5FwText[0];
6120                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6121                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6122                 info.rodata_data = &tg3Tso5FwRodata[0];
6123                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6124                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6125                 info.data_data = &tg3Tso5FwData[0];
6126                 cpu_base = RX_CPU_BASE;
6127                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6128                 cpu_scratch_size = (info.text_len +
6129                                     info.rodata_len +
6130                                     info.data_len +
6131                                     TG3_TSO5_FW_SBSS_LEN +
6132                                     TG3_TSO5_FW_BSS_LEN);
6133         } else {
6134                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6135                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6136                 info.text_data = &tg3TsoFwText[0];
6137                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6138                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6139                 info.rodata_data = &tg3TsoFwRodata[0];
6140                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6141                 info.data_len = TG3_TSO_FW_DATA_LEN;
6142                 info.data_data = &tg3TsoFwData[0];
6143                 cpu_base = TX_CPU_BASE;
6144                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6145                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6146         }
6147
6148         err = tg3_load_firmware_cpu(tp, cpu_base,
6149                                     cpu_scratch_base, cpu_scratch_size,
6150                                     &info);
6151         if (err)
6152                 return err;
6153
6154         /* Now startup the cpu. */
6155         tw32(cpu_base + CPU_STATE, 0xffffffff);
6156         tw32_f(cpu_base + CPU_PC,    info.text_base);
6157
6158         for (i = 0; i < 5; i++) {
6159                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6160                         break;
6161                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6162                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6163                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6164                 udelay(1000);
6165         }
6166         if (i >= 5) {
6167                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6168                        "to set CPU PC, is %08x should be %08x\n",
6169                        tp->dev->name, tr32(cpu_base + CPU_PC),
6170                        info.text_base);
6171                 return -ENODEV;
6172         }
6173         tw32(cpu_base + CPU_STATE, 0xffffffff);
6174         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6175         return 0;
6176 }
6177
6178
6179 /* tp->lock is held. */
6180 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6181 {
6182         u32 addr_high, addr_low;
6183         int i;
6184
6185         addr_high = ((tp->dev->dev_addr[0] << 8) |
6186                      tp->dev->dev_addr[1]);
6187         addr_low = ((tp->dev->dev_addr[2] << 24) |
6188                     (tp->dev->dev_addr[3] << 16) |
6189                     (tp->dev->dev_addr[4] <<  8) |
6190                     (tp->dev->dev_addr[5] <<  0));
6191         for (i = 0; i < 4; i++) {
6192                 if (i == 1 && skip_mac_1)
6193                         continue;
6194                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6195                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6196         }
6197
6198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6200                 for (i = 0; i < 12; i++) {
6201                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6202                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6203                 }
6204         }
6205
6206         addr_high = (tp->dev->dev_addr[0] +
6207                      tp->dev->dev_addr[1] +
6208                      tp->dev->dev_addr[2] +
6209                      tp->dev->dev_addr[3] +
6210                      tp->dev->dev_addr[4] +
6211                      tp->dev->dev_addr[5]) &
6212                 TX_BACKOFF_SEED_MASK;
6213         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6214 }
6215
6216 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6217 {
6218         struct tg3 *tp = netdev_priv(dev);
6219         struct sockaddr *addr = p;
6220         int err = 0, skip_mac_1 = 0;
6221
6222         if (!is_valid_ether_addr(addr->sa_data))
6223                 return -EINVAL;
6224
6225         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6226
6227         if (!netif_running(dev))
6228                 return 0;
6229
6230         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6231                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6232
6233                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6234                 addr0_low = tr32(MAC_ADDR_0_LOW);
6235                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6236                 addr1_low = tr32(MAC_ADDR_1_LOW);
6237
6238                 /* Skip MAC addr 1 if ASF is using it. */
6239                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6240                     !(addr1_high == 0 && addr1_low == 0))
6241                         skip_mac_1 = 1;
6242         }
6243         spin_lock_bh(&tp->lock);
6244         __tg3_set_mac_addr(tp, skip_mac_1);
6245         spin_unlock_bh(&tp->lock);
6246
6247         return err;
6248 }
6249
6250 /* tp->lock is held. */
6251 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6252                            dma_addr_t mapping, u32 maxlen_flags,
6253                            u32 nic_addr)
6254 {
6255         tg3_write_mem(tp,
6256                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6257                       ((u64) mapping >> 32));
6258         tg3_write_mem(tp,
6259                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6260                       ((u64) mapping & 0xffffffff));
6261         tg3_write_mem(tp,
6262                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6263                        maxlen_flags);
6264
6265         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6266                 tg3_write_mem(tp,
6267                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6268                               nic_addr);
6269 }
6270
6271 static void __tg3_set_rx_mode(struct net_device *);
6272 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6273 {
6274         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6275         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6276         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6277         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6278         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6279                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6280                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6281         }
6282         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6283         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6284         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6285                 u32 val = ec->stats_block_coalesce_usecs;
6286
6287                 if (!netif_carrier_ok(tp->dev))
6288                         val = 0;
6289
6290                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6291         }
6292 }
6293
6294 /* tp->lock is held. */
6295 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6296 {
6297         u32 val, rdmac_mode;
6298         int i, err, limit;
6299
6300         tg3_disable_ints(tp);
6301
6302         tg3_stop_fw(tp);
6303
6304         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6305
6306         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6307                 tg3_abort_hw(tp, 1);
6308         }
6309
6310         if (reset_phy)
6311                 tg3_phy_reset(tp);
6312
6313         err = tg3_chip_reset(tp);
6314         if (err)
6315                 return err;
6316
6317         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6318
6319         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
6320                 val = tr32(TG3_CPMU_CTRL);
6321                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6322                 tw32(TG3_CPMU_CTRL, val);
6323         }
6324
6325         /* This works around an issue with Athlon chipsets on
6326          * B3 tigon3 silicon.  This bit has no effect on any
6327          * other revision.  But do not set this on PCI Express
6328          * chips and don't even touch the clocks if the CPMU is present.
6329          */
6330         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6331                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6332                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6333                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6334         }
6335
6336         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6337             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6338                 val = tr32(TG3PCI_PCISTATE);
6339                 val |= PCISTATE_RETRY_SAME_DMA;
6340                 tw32(TG3PCI_PCISTATE, val);
6341         }
6342
6343         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6344                 /* Allow reads and writes to the
6345                  * APE register and memory space.
6346                  */
6347                 val = tr32(TG3PCI_PCISTATE);
6348                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6349                        PCISTATE_ALLOW_APE_SHMEM_WR;
6350                 tw32(TG3PCI_PCISTATE, val);
6351         }
6352
6353         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6354                 /* Enable some hw fixes.  */
6355                 val = tr32(TG3PCI_MSI_DATA);
6356                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6357                 tw32(TG3PCI_MSI_DATA, val);
6358         }
6359
6360         /* Descriptor ring init may make accesses to the
6361          * NIC SRAM area to setup the TX descriptors, so we
6362          * can only do this after the hardware has been
6363          * successfully reset.
6364          */
6365         err = tg3_init_rings(tp);
6366         if (err)
6367                 return err;
6368
6369         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6370             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6371                 /* This value is determined during the probe time DMA
6372                  * engine test, tg3_test_dma.
6373                  */
6374                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6375         }
6376
6377         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6378                           GRC_MODE_4X_NIC_SEND_RINGS |
6379                           GRC_MODE_NO_TX_PHDR_CSUM |
6380                           GRC_MODE_NO_RX_PHDR_CSUM);
6381         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6382
6383         /* Pseudo-header checksum is done by hardware logic and not
6384          * the offload processers, so make the chip do the pseudo-
6385          * header checksums on receive.  For transmit it is more
6386          * convenient to do the pseudo-header checksum in software
6387          * as Linux does that on transmit for us in all cases.
6388          */
6389         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6390
6391         tw32(GRC_MODE,
6392              tp->grc_mode |
6393              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6394
6395         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6396         val = tr32(GRC_MISC_CFG);
6397         val &= ~0xff;
6398         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6399         tw32(GRC_MISC_CFG, val);
6400
6401         /* Initialize MBUF/DESC pool. */
6402         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6403                 /* Do nothing.  */
6404         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6405                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6406                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6407                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6408                 else
6409                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6410                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6411                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6412         }
6413         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6414                 int fw_len;
6415
6416                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6417                           TG3_TSO5_FW_RODATA_LEN +
6418                           TG3_TSO5_FW_DATA_LEN +
6419                           TG3_TSO5_FW_SBSS_LEN +
6420                           TG3_TSO5_FW_BSS_LEN);
6421                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6422                 tw32(BUFMGR_MB_POOL_ADDR,
6423                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6424                 tw32(BUFMGR_MB_POOL_SIZE,
6425                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6426         }
6427
6428         if (tp->dev->mtu <= ETH_DATA_LEN) {
6429                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6430                      tp->bufmgr_config.mbuf_read_dma_low_water);
6431                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6432                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6433                 tw32(BUFMGR_MB_HIGH_WATER,
6434                      tp->bufmgr_config.mbuf_high_water);
6435         } else {
6436                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6437                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6438                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6439                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6440                 tw32(BUFMGR_MB_HIGH_WATER,
6441                      tp->bufmgr_config.mbuf_high_water_jumbo);
6442         }
6443         tw32(BUFMGR_DMA_LOW_WATER,
6444              tp->bufmgr_config.dma_low_water);
6445         tw32(BUFMGR_DMA_HIGH_WATER,
6446              tp->bufmgr_config.dma_high_water);
6447
6448         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6449         for (i = 0; i < 2000; i++) {
6450                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6451                         break;
6452                 udelay(10);
6453         }
6454         if (i >= 2000) {
6455                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6456                        tp->dev->name);
6457                 return -ENODEV;
6458         }
6459
6460         /* Setup replenish threshold. */
6461         val = tp->rx_pending / 8;
6462         if (val == 0)
6463                 val = 1;
6464         else if (val > tp->rx_std_max_post)
6465                 val = tp->rx_std_max_post;
6466         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6467                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6468                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6469
6470                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6471                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6472         }
6473
6474         tw32(RCVBDI_STD_THRESH, val);
6475
6476         /* Initialize TG3_BDINFO's at:
6477          *  RCVDBDI_STD_BD:     standard eth size rx ring
6478          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6479          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6480          *
6481          * like so:
6482          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6483          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6484          *                              ring attribute flags
6485          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6486          *
6487          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6488          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6489          *
6490          * The size of each ring is fixed in the firmware, but the location is
6491          * configurable.
6492          */
6493         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6494              ((u64) tp->rx_std_mapping >> 32));
6495         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6496              ((u64) tp->rx_std_mapping & 0xffffffff));
6497         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6498              NIC_SRAM_RX_BUFFER_DESC);
6499
6500         /* Don't even try to program the JUMBO/MINI buffer descriptor
6501          * configs on 5705.
6502          */
6503         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6504                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6505                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6506         } else {
6507                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6508                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6509
6510                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6511                      BDINFO_FLAGS_DISABLED);
6512
6513                 /* Setup replenish threshold. */
6514                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6515
6516                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6517                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6518                              ((u64) tp->rx_jumbo_mapping >> 32));
6519                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6520                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6521                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6522                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6523                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6524                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6525                 } else {
6526                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6527                              BDINFO_FLAGS_DISABLED);
6528                 }
6529
6530         }
6531
6532         /* There is only one send ring on 5705/5750, no need to explicitly
6533          * disable the others.
6534          */
6535         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6536                 /* Clear out send RCB ring in SRAM. */
6537                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6538                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6539                                       BDINFO_FLAGS_DISABLED);
6540         }
6541
6542         tp->tx_prod = 0;
6543         tp->tx_cons = 0;
6544         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6545         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6546
6547         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6548                        tp->tx_desc_mapping,
6549                        (TG3_TX_RING_SIZE <<
6550                         BDINFO_FLAGS_MAXLEN_SHIFT),
6551                        NIC_SRAM_TX_BUFFER_DESC);
6552
6553         /* There is only one receive return ring on 5705/5750, no need
6554          * to explicitly disable the others.
6555          */
6556         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6557                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6558                      i += TG3_BDINFO_SIZE) {
6559                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6560                                       BDINFO_FLAGS_DISABLED);
6561                 }
6562         }
6563
6564         tp->rx_rcb_ptr = 0;
6565         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6566
6567         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6568                        tp->rx_rcb_mapping,
6569                        (TG3_RX_RCB_RING_SIZE(tp) <<
6570                         BDINFO_FLAGS_MAXLEN_SHIFT),
6571                        0);
6572
6573         tp->rx_std_ptr = tp->rx_pending;
6574         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6575                      tp->rx_std_ptr);
6576
6577         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6578                                                 tp->rx_jumbo_pending : 0;
6579         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6580                      tp->rx_jumbo_ptr);
6581
6582         /* Initialize MAC address and backoff seed. */
6583         __tg3_set_mac_addr(tp, 0);
6584
6585         /* MTU + ethernet header + FCS + optional VLAN tag */
6586         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6587
6588         /* The slot time is changed by tg3_setup_phy if we
6589          * run at gigabit with half duplex.
6590          */
6591         tw32(MAC_TX_LENGTHS,
6592              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6593              (6 << TX_LENGTHS_IPG_SHIFT) |
6594              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6595
6596         /* Receive rules. */
6597         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6598         tw32(RCVLPC_CONFIG, 0x0181);
6599
6600         /* Calculate RDMAC_MODE setting early, we need it to determine
6601          * the RCVLPC_STATE_ENABLE mask.
6602          */
6603         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6604                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6605                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6606                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6607                       RDMAC_MODE_LNGREAD_ENAB);
6608
6609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6610                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6611                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6612                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6613
6614         /* If statement applies to 5705 and 5750 PCI devices only */
6615         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6616              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6617             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6618                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6619                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6620                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6621                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6622                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6623                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6624                 }
6625         }
6626
6627         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6628                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6629
6630         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6631                 rdmac_mode |= (1 << 27);
6632
6633         /* Receive/send statistics. */
6634         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6635                 val = tr32(RCVLPC_STATS_ENABLE);
6636                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6637                 tw32(RCVLPC_STATS_ENABLE, val);
6638         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6639                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6640                 val = tr32(RCVLPC_STATS_ENABLE);
6641                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6642                 tw32(RCVLPC_STATS_ENABLE, val);
6643         } else {
6644                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6645         }
6646         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6647         tw32(SNDDATAI_STATSENAB, 0xffffff);
6648         tw32(SNDDATAI_STATSCTRL,
6649              (SNDDATAI_SCTRL_ENABLE |
6650               SNDDATAI_SCTRL_FASTUPD));
6651
6652         /* Setup host coalescing engine. */
6653         tw32(HOSTCC_MODE, 0);
6654         for (i = 0; i < 2000; i++) {
6655                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6656                         break;
6657                 udelay(10);
6658         }
6659
6660         __tg3_set_coalesce(tp, &tp->coal);
6661
6662         /* set status block DMA address */
6663         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6664              ((u64) tp->status_mapping >> 32));
6665         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6666              ((u64) tp->status_mapping & 0xffffffff));
6667
6668         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6669                 /* Status/statistics block address.  See tg3_timer,
6670                  * the tg3_periodic_fetch_stats call there, and
6671                  * tg3_get_stats to see how this works for 5705/5750 chips.
6672                  */
6673                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6674                      ((u64) tp->stats_mapping >> 32));
6675                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6676                      ((u64) tp->stats_mapping & 0xffffffff));
6677                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6678                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6679         }
6680
6681         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6682
6683         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6684         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6685         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6686                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6687
6688         /* Clear statistics/status block in chip, and status block in ram. */
6689         for (i = NIC_SRAM_STATS_BLK;
6690              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6691              i += sizeof(u32)) {
6692                 tg3_write_mem(tp, i, 0);
6693                 udelay(40);
6694         }
6695         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6696
6697         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6698                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6699                 /* reset to prevent losing 1st rx packet intermittently */
6700                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6701                 udelay(10);
6702         }
6703
6704         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6705                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6706         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6707             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6708             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6709                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6710         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6711         udelay(40);
6712
6713         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6714          * If TG3_FLG2_IS_NIC is zero, we should read the
6715          * register to preserve the GPIO settings for LOMs. The GPIOs,
6716          * whether used as inputs or outputs, are set by boot code after
6717          * reset.
6718          */
6719         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6720                 u32 gpio_mask;
6721
6722                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6723                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6724                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6725
6726                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6727                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6728                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6729
6730                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6731                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6732
6733                 tp->grc_local_ctrl &= ~gpio_mask;
6734                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6735
6736                 /* GPIO1 must be driven high for eeprom write protect */
6737                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6738                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6739                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6740         }
6741         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6742         udelay(100);
6743
6744         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6745         tp->last_tag = 0;
6746
6747         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6748                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6749                 udelay(40);
6750         }
6751
6752         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6753                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6754                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6755                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6756                WDMAC_MODE_LNGREAD_ENAB);
6757
6758         /* If statement applies to 5705 and 5750 PCI devices only */
6759         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6760              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6761             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6762                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6763                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6764                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6765                         /* nothing */
6766                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6767                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6768                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6769                         val |= WDMAC_MODE_RX_ACCEL;
6770                 }
6771         }
6772
6773         /* Enable host coalescing bug fix */
6774         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6775             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6776             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6777             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6778                 val |= (1 << 29);
6779
6780         tw32_f(WDMAC_MODE, val);
6781         udelay(40);
6782
6783         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6784                 u16 pcix_cmd;
6785
6786                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6787                                      &pcix_cmd);
6788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6789                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6790                         pcix_cmd |= PCI_X_CMD_READ_2K;
6791                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6792                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6793                         pcix_cmd |= PCI_X_CMD_READ_2K;
6794                 }
6795                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6796                                       pcix_cmd);
6797         }
6798
6799         tw32_f(RDMAC_MODE, rdmac_mode);
6800         udelay(40);
6801
6802         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6803         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6804                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6805
6806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6807                 tw32(SNDDATAC_MODE,
6808                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6809         else
6810                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6811
6812         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6813         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6814         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6815         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6816         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6817                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6818         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6819         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6820
6821         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6822                 err = tg3_load_5701_a0_firmware_fix(tp);
6823                 if (err)
6824                         return err;
6825         }
6826
6827         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6828                 err = tg3_load_tso_firmware(tp);
6829                 if (err)
6830                         return err;
6831         }
6832
6833         tp->tx_mode = TX_MODE_ENABLE;
6834         tw32_f(MAC_TX_MODE, tp->tx_mode);
6835         udelay(100);
6836
6837         tp->rx_mode = RX_MODE_ENABLE;
6838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6840                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6841
6842         tw32_f(MAC_RX_MODE, tp->rx_mode);
6843         udelay(10);
6844
6845         if (tp->link_config.phy_is_low_power) {
6846                 tp->link_config.phy_is_low_power = 0;
6847                 tp->link_config.speed = tp->link_config.orig_speed;
6848                 tp->link_config.duplex = tp->link_config.orig_duplex;
6849                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6850         }
6851
6852         tp->mi_mode = MAC_MI_MODE_BASE;
6853         tw32_f(MAC_MI_MODE, tp->mi_mode);
6854         udelay(80);
6855
6856         tw32(MAC_LED_CTRL, tp->led_ctrl);
6857
6858         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6859         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6860                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6861                 udelay(10);
6862         }
6863         tw32_f(MAC_RX_MODE, tp->rx_mode);
6864         udelay(10);
6865
6866         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6867                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6868                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6869                         /* Set drive transmission level to 1.2V  */
6870                         /* only if the signal pre-emphasis bit is not set  */
6871                         val = tr32(MAC_SERDES_CFG);
6872                         val &= 0xfffff000;
6873                         val |= 0x880;
6874                         tw32(MAC_SERDES_CFG, val);
6875                 }
6876                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6877                         tw32(MAC_SERDES_CFG, 0x616000);
6878         }
6879
6880         /* Prevent chip from dropping frames when flow control
6881          * is enabled.
6882          */
6883         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6884
6885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6886             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6887                 /* Use hardware link auto-negotiation */
6888                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6889         }
6890
6891         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6892             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6893                 u32 tmp;
6894
6895                 tmp = tr32(SERDES_RX_CTRL);
6896                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6897                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6898                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6899                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6900         }
6901
6902         err = tg3_setup_phy(tp, 0);
6903         if (err)
6904                 return err;
6905
6906         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6907             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6908                 u32 tmp;
6909
6910                 /* Clear CRC stats. */
6911                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6912                         tg3_writephy(tp, MII_TG3_TEST1,
6913                                      tmp | MII_TG3_TEST1_CRC_EN);
6914                         tg3_readphy(tp, 0x14, &tmp);
6915                 }
6916         }
6917
6918         __tg3_set_rx_mode(tp->dev);
6919
6920         /* Initialize receive rules. */
6921         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6922         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6923         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6924         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6925
6926         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6927             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6928                 limit = 8;
6929         else
6930                 limit = 16;
6931         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6932                 limit -= 4;
6933         switch (limit) {
6934         case 16:
6935                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6936         case 15:
6937                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6938         case 14:
6939                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6940         case 13:
6941                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6942         case 12:
6943                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6944         case 11:
6945                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6946         case 10:
6947                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6948         case 9:
6949                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6950         case 8:
6951                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6952         case 7:
6953                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6954         case 6:
6955                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6956         case 5:
6957                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6958         case 4:
6959                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6960         case 3:
6961                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6962         case 2:
6963         case 1:
6964
6965         default:
6966                 break;
6967         };
6968
6969         /* Write our heartbeat update interval to APE. */
6970         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
6971                         APE_HOST_HEARTBEAT_INT_DISABLE);
6972
6973         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6974
6975         return 0;
6976 }
6977
6978 /* Called at device open time to get the chip ready for
6979  * packet processing.  Invoked with tp->lock held.
6980  */
6981 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6982 {
6983         int err;
6984
6985         /* Force the chip into D0. */
6986         err = tg3_set_power_state(tp, PCI_D0);
6987         if (err)
6988                 goto out;
6989
6990         tg3_switch_clocks(tp);
6991
6992         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6993
6994         err = tg3_reset_hw(tp, reset_phy);
6995
6996 out:
6997         return err;
6998 }
6999
7000 #define TG3_STAT_ADD32(PSTAT, REG) \
7001 do {    u32 __val = tr32(REG); \
7002         (PSTAT)->low += __val; \
7003         if ((PSTAT)->low < __val) \
7004                 (PSTAT)->high += 1; \
7005 } while (0)
7006
7007 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7008 {
7009         struct tg3_hw_stats *sp = tp->hw_stats;
7010
7011         if (!netif_carrier_ok(tp->dev))
7012                 return;
7013
7014         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7015         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7016         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7017         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7018         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7019         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7020         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7021         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7022         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7023         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7024         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7025         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7026         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7027
7028         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7029         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7030         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7031         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7032         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7033         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7034         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7035         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7036         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7037         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7038         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7039         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7040         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7041         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7042
7043         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7044         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7045         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7046 }
7047
7048 static void tg3_timer(unsigned long __opaque)
7049 {
7050         struct tg3 *tp = (struct tg3 *) __opaque;
7051
7052         if (tp->irq_sync)
7053                 goto restart_timer;
7054
7055         spin_lock(&tp->lock);
7056
7057         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7058                 /* All of this garbage is because when using non-tagged
7059                  * IRQ status the mailbox/status_block protocol the chip
7060                  * uses with the cpu is race prone.
7061                  */
7062                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7063                         tw32(GRC_LOCAL_CTRL,
7064                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7065                 } else {
7066                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7067                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7068                 }
7069
7070                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7071                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7072                         spin_unlock(&tp->lock);
7073                         schedule_work(&tp->reset_task);
7074                         return;
7075                 }
7076         }
7077
7078         /* This part only runs once per second. */
7079         if (!--tp->timer_counter) {
7080                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7081                         tg3_periodic_fetch_stats(tp);
7082
7083                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7084                         u32 mac_stat;
7085                         int phy_event;
7086
7087                         mac_stat = tr32(MAC_STATUS);
7088
7089                         phy_event = 0;
7090                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7091                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7092                                         phy_event = 1;
7093                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7094                                 phy_event = 1;
7095
7096                         if (phy_event)
7097                                 tg3_setup_phy(tp, 0);
7098                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7099                         u32 mac_stat = tr32(MAC_STATUS);
7100                         int need_setup = 0;
7101
7102                         if (netif_carrier_ok(tp->dev) &&
7103                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7104                                 need_setup = 1;
7105                         }
7106                         if (! netif_carrier_ok(tp->dev) &&
7107                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7108                                          MAC_STATUS_SIGNAL_DET))) {
7109                                 need_setup = 1;
7110                         }
7111                         if (need_setup) {
7112                                 if (!tp->serdes_counter) {
7113                                         tw32_f(MAC_MODE,
7114                                              (tp->mac_mode &
7115                                               ~MAC_MODE_PORT_MODE_MASK));
7116                                         udelay(40);
7117                                         tw32_f(MAC_MODE, tp->mac_mode);
7118                                         udelay(40);
7119                                 }
7120                                 tg3_setup_phy(tp, 0);
7121                         }
7122                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7123                         tg3_serdes_parallel_detect(tp);
7124
7125                 tp->timer_counter = tp->timer_multiplier;
7126         }
7127
7128         /* Heartbeat is only sent once every 2 seconds.
7129          *
7130          * The heartbeat is to tell the ASF firmware that the host
7131          * driver is still alive.  In the event that the OS crashes,
7132          * ASF needs to reset the hardware to free up the FIFO space
7133          * that may be filled with rx packets destined for the host.
7134          * If the FIFO is full, ASF will no longer function properly.
7135          *
7136          * Unintended resets have been reported on real time kernels
7137          * where the timer doesn't run on time.  Netpoll will also have
7138          * same problem.
7139          *
7140          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7141          * to check the ring condition when the heartbeat is expiring
7142          * before doing the reset.  This will prevent most unintended
7143          * resets.
7144          */
7145         if (!--tp->asf_counter) {
7146                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7147                         u32 val;
7148
7149                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7150                                       FWCMD_NICDRV_ALIVE3);
7151                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7152                         /* 5 seconds timeout */
7153                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7154                         val = tr32(GRC_RX_CPU_EVENT);
7155                         val |= (1 << 14);
7156                         tw32(GRC_RX_CPU_EVENT, val);
7157                 }
7158                 tp->asf_counter = tp->asf_multiplier;
7159         }
7160
7161         spin_unlock(&tp->lock);
7162
7163 restart_timer:
7164         tp->timer.expires = jiffies + tp->timer_offset;
7165         add_timer(&tp->timer);
7166 }
7167
7168 static int tg3_request_irq(struct tg3 *tp)
7169 {
7170         irq_handler_t fn;
7171         unsigned long flags;
7172         struct net_device *dev = tp->dev;
7173
7174         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7175                 fn = tg3_msi;
7176                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7177                         fn = tg3_msi_1shot;
7178                 flags = IRQF_SAMPLE_RANDOM;
7179         } else {
7180                 fn = tg3_interrupt;
7181                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7182                         fn = tg3_interrupt_tagged;
7183                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7184         }
7185         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7186 }
7187
7188 static int tg3_test_interrupt(struct tg3 *tp)
7189 {
7190         struct net_device *dev = tp->dev;
7191         int err, i, intr_ok = 0;
7192
7193         if (!netif_running(dev))
7194                 return -ENODEV;
7195
7196         tg3_disable_ints(tp);
7197
7198         free_irq(tp->pdev->irq, dev);
7199
7200         err = request_irq(tp->pdev->irq, tg3_test_isr,
7201                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7202         if (err)
7203                 return err;
7204
7205         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7206         tg3_enable_ints(tp);
7207
7208         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7209                HOSTCC_MODE_NOW);
7210
7211         for (i = 0; i < 5; i++) {
7212                 u32 int_mbox, misc_host_ctrl;
7213
7214                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7215                                         TG3_64BIT_REG_LOW);
7216                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7217
7218                 if ((int_mbox != 0) ||
7219                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7220                         intr_ok = 1;
7221                         break;
7222                 }
7223
7224                 msleep(10);
7225         }
7226
7227         tg3_disable_ints(tp);
7228
7229         free_irq(tp->pdev->irq, dev);
7230
7231         err = tg3_request_irq(tp);
7232
7233         if (err)
7234                 return err;
7235
7236         if (intr_ok)
7237                 return 0;
7238
7239         return -EIO;
7240 }
7241
7242 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7243  * successfully restored
7244  */
7245 static int tg3_test_msi(struct tg3 *tp)
7246 {
7247         struct net_device *dev = tp->dev;
7248         int err;
7249         u16 pci_cmd;
7250
7251         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7252                 return 0;
7253
7254         /* Turn off SERR reporting in case MSI terminates with Master
7255          * Abort.
7256          */
7257         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7258         pci_write_config_word(tp->pdev, PCI_COMMAND,
7259                               pci_cmd & ~PCI_COMMAND_SERR);
7260
7261         err = tg3_test_interrupt(tp);
7262
7263         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7264
7265         if (!err)
7266                 return 0;
7267
7268         /* other failures */
7269         if (err != -EIO)
7270                 return err;
7271
7272         /* MSI test failed, go back to INTx mode */
7273         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7274                "switching to INTx mode. Please report this failure to "
7275                "the PCI maintainer and include system chipset information.\n",
7276                        tp->dev->name);
7277
7278         free_irq(tp->pdev->irq, dev);
7279         pci_disable_msi(tp->pdev);
7280
7281         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7282
7283         err = tg3_request_irq(tp);
7284         if (err)
7285                 return err;
7286
7287         /* Need to reset the chip because the MSI cycle may have terminated
7288          * with Master Abort.
7289          */
7290         tg3_full_lock(tp, 1);
7291
7292         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7293         err = tg3_init_hw(tp, 1);
7294
7295         tg3_full_unlock(tp);
7296
7297         if (err)
7298                 free_irq(tp->pdev->irq, dev);
7299
7300         return err;
7301 }
7302
7303 static int tg3_open(struct net_device *dev)
7304 {
7305         struct tg3 *tp = netdev_priv(dev);
7306         int err;
7307
7308         netif_carrier_off(tp->dev);
7309
7310         tg3_full_lock(tp, 0);
7311
7312         err = tg3_set_power_state(tp, PCI_D0);
7313         if (err) {
7314                 tg3_full_unlock(tp);
7315                 return err;
7316         }
7317
7318         tg3_disable_ints(tp);
7319         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7320
7321         tg3_full_unlock(tp);
7322
7323         /* The placement of this call is tied
7324          * to the setup and use of Host TX descriptors.
7325          */
7326         err = tg3_alloc_consistent(tp);
7327         if (err)
7328                 return err;
7329
7330         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7331                 /* All MSI supporting chips should support tagged
7332                  * status.  Assert that this is the case.
7333                  */
7334                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7335                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7336                                "Not using MSI.\n", tp->dev->name);
7337                 } else if (pci_enable_msi(tp->pdev) == 0) {
7338                         u32 msi_mode;
7339
7340                         /* Hardware bug - MSI won't work if INTX disabled. */
7341                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7342                                 pci_intx(tp->pdev, 1);
7343
7344                         msi_mode = tr32(MSGINT_MODE);
7345                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7346                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7347                 }
7348         }
7349         err = tg3_request_irq(tp);
7350
7351         if (err) {
7352                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7353                         pci_disable_msi(tp->pdev);
7354                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7355                 }
7356                 tg3_free_consistent(tp);
7357                 return err;
7358         }
7359
7360         napi_enable(&tp->napi);
7361
7362         tg3_full_lock(tp, 0);
7363
7364         err = tg3_init_hw(tp, 1);
7365         if (err) {
7366                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7367                 tg3_free_rings(tp);
7368         } else {
7369                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7370                         tp->timer_offset = HZ;
7371                 else
7372                         tp->timer_offset = HZ / 10;
7373
7374                 BUG_ON(tp->timer_offset > HZ);
7375                 tp->timer_counter = tp->timer_multiplier =
7376                         (HZ / tp->timer_offset);
7377                 tp->asf_counter = tp->asf_multiplier =
7378                         ((HZ / tp->timer_offset) * 2);
7379
7380                 init_timer(&tp->timer);
7381                 tp->timer.expires = jiffies + tp->timer_offset;
7382                 tp->timer.data = (unsigned long) tp;
7383                 tp->timer.function = tg3_timer;
7384         }
7385
7386         tg3_full_unlock(tp);
7387
7388         if (err) {
7389                 napi_disable(&tp->napi);
7390                 free_irq(tp->pdev->irq, dev);
7391                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7392                         pci_disable_msi(tp->pdev);
7393                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7394                 }
7395                 tg3_free_consistent(tp);
7396                 return err;
7397         }
7398
7399         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7400                 err = tg3_test_msi(tp);
7401
7402                 if (err) {
7403                         tg3_full_lock(tp, 0);
7404
7405                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7406                                 pci_disable_msi(tp->pdev);
7407                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7408                         }
7409                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7410                         tg3_free_rings(tp);
7411                         tg3_free_consistent(tp);
7412
7413                         tg3_full_unlock(tp);
7414
7415                         napi_disable(&tp->napi);
7416
7417                         return err;
7418                 }
7419
7420                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7421                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7422                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7423
7424                                 tw32(PCIE_TRANSACTION_CFG,
7425                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7426                         }
7427                 }
7428         }
7429
7430         tg3_full_lock(tp, 0);
7431
7432         add_timer(&tp->timer);
7433         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7434         tg3_enable_ints(tp);
7435
7436         tg3_full_unlock(tp);
7437
7438         netif_start_queue(dev);
7439
7440         return 0;
7441 }
7442
7443 #if 0
7444 /*static*/ void tg3_dump_state(struct tg3 *tp)
7445 {
7446         u32 val32, val32_2, val32_3, val32_4, val32_5;
7447         u16 val16;
7448         int i;
7449
7450         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7451         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7452         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7453                val16, val32);
7454
7455         /* MAC block */
7456         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7457                tr32(MAC_MODE), tr32(MAC_STATUS));
7458         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7459                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7460         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7461                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7462         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7463                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7464
7465         /* Send data initiator control block */
7466         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7467                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7468         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7469                tr32(SNDDATAI_STATSCTRL));
7470
7471         /* Send data completion control block */
7472         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7473
7474         /* Send BD ring selector block */
7475         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7476                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7477
7478         /* Send BD initiator control block */
7479         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7480                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7481
7482         /* Send BD completion control block */
7483         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7484
7485         /* Receive list placement control block */
7486         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7487                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7488         printk("       RCVLPC_STATSCTRL[%08x]\n",
7489                tr32(RCVLPC_STATSCTRL));
7490
7491         /* Receive data and receive BD initiator control block */
7492         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7493                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7494
7495         /* Receive data completion control block */
7496         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7497                tr32(RCVDCC_MODE));
7498
7499         /* Receive BD initiator control block */
7500         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7501                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7502
7503         /* Receive BD completion control block */
7504         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7505                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7506
7507         /* Receive list selector control block */
7508         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7509                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7510
7511         /* Mbuf cluster free block */
7512         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7513                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7514
7515         /* Host coalescing control block */
7516         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7517                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7518         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7519                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7520                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7521         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7522                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7523                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7524         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7525                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7526         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7527                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7528
7529         /* Memory arbiter control block */
7530         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7531                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7532
7533         /* Buffer manager control block */
7534         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7535                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7536         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7537                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7538         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7539                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7540                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7541                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7542
7543         /* Read DMA control block */
7544         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7545                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7546
7547         /* Write DMA control block */
7548         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7549                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7550
7551         /* DMA completion block */
7552         printk("DEBUG: DMAC_MODE[%08x]\n",
7553                tr32(DMAC_MODE));
7554
7555         /* GRC block */
7556         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7557                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7558         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7559                tr32(GRC_LOCAL_CTRL));
7560
7561         /* TG3_BDINFOs */
7562         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7563                tr32(RCVDBDI_JUMBO_BD + 0x0),
7564                tr32(RCVDBDI_JUMBO_BD + 0x4),
7565                tr32(RCVDBDI_JUMBO_BD + 0x8),
7566                tr32(RCVDBDI_JUMBO_BD + 0xc));
7567         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7568                tr32(RCVDBDI_STD_BD + 0x0),
7569                tr32(RCVDBDI_STD_BD + 0x4),
7570                tr32(RCVDBDI_STD_BD + 0x8),
7571                tr32(RCVDBDI_STD_BD + 0xc));
7572         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7573                tr32(RCVDBDI_MINI_BD + 0x0),
7574                tr32(RCVDBDI_MINI_BD + 0x4),
7575                tr32(RCVDBDI_MINI_BD + 0x8),
7576                tr32(RCVDBDI_MINI_BD + 0xc));
7577
7578         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7579         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7580         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7581         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7582         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7583                val32, val32_2, val32_3, val32_4);
7584
7585         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7586         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7587         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7588         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7589         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7590                val32, val32_2, val32_3, val32_4);
7591
7592         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7593         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7594         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7595         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7596         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7597         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7598                val32, val32_2, val32_3, val32_4, val32_5);
7599
7600         /* SW status block */
7601         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7602                tp->hw_status->status,
7603                tp->hw_status->status_tag,
7604                tp->hw_status->rx_jumbo_consumer,
7605                tp->hw_status->rx_consumer,
7606                tp->hw_status->rx_mini_consumer,
7607                tp->hw_status->idx[0].rx_producer,
7608                tp->hw_status->idx[0].tx_consumer);
7609
7610         /* SW statistics block */
7611         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7612                ((u32 *)tp->hw_stats)[0],
7613                ((u32 *)tp->hw_stats)[1],
7614                ((u32 *)tp->hw_stats)[2],
7615                ((u32 *)tp->hw_stats)[3]);
7616
7617         /* Mailboxes */
7618         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7619                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7620                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7621                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7622                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7623
7624         /* NIC side send descriptors. */
7625         for (i = 0; i < 6; i++) {
7626                 unsigned long txd;
7627
7628                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7629                         + (i * sizeof(struct tg3_tx_buffer_desc));
7630                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7631                        i,
7632                        readl(txd + 0x0), readl(txd + 0x4),
7633                        readl(txd + 0x8), readl(txd + 0xc));
7634         }
7635
7636         /* NIC side RX descriptors. */
7637         for (i = 0; i < 6; i++) {
7638                 unsigned long rxd;
7639
7640                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7641                         + (i * sizeof(struct tg3_rx_buffer_desc));
7642                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7643                        i,
7644                        readl(rxd + 0x0), readl(rxd + 0x4),
7645                        readl(rxd + 0x8), readl(rxd + 0xc));
7646                 rxd += (4 * sizeof(u32));
7647                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7648                        i,
7649                        readl(rxd + 0x0), readl(rxd + 0x4),
7650                        readl(rxd + 0x8), readl(rxd + 0xc));
7651         }
7652
7653         for (i = 0; i < 6; i++) {
7654                 unsigned long rxd;
7655
7656                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7657                         + (i * sizeof(struct tg3_rx_buffer_desc));
7658                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7659                        i,
7660                        readl(rxd + 0x0), readl(rxd + 0x4),
7661                        readl(rxd + 0x8), readl(rxd + 0xc));
7662                 rxd += (4 * sizeof(u32));
7663                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7664                        i,
7665                        readl(rxd + 0x0), readl(rxd + 0x4),
7666                        readl(rxd + 0x8), readl(rxd + 0xc));
7667         }
7668 }
7669 #endif
7670
7671 static struct net_device_stats *tg3_get_stats(struct net_device *);
7672 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7673
7674 static int tg3_close(struct net_device *dev)
7675 {
7676         struct tg3 *tp = netdev_priv(dev);
7677
7678         napi_disable(&tp->napi);
7679         cancel_work_sync(&tp->reset_task);
7680
7681         netif_stop_queue(dev);
7682
7683         del_timer_sync(&tp->timer);
7684
7685         tg3_full_lock(tp, 1);
7686 #if 0
7687         tg3_dump_state(tp);
7688 #endif
7689
7690         tg3_disable_ints(tp);
7691
7692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7693         tg3_free_rings(tp);
7694         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7695
7696         tg3_full_unlock(tp);
7697
7698         free_irq(tp->pdev->irq, dev);
7699         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7700                 pci_disable_msi(tp->pdev);
7701                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7702         }
7703
7704         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7705                sizeof(tp->net_stats_prev));
7706         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7707                sizeof(tp->estats_prev));
7708
7709         tg3_free_consistent(tp);
7710
7711         tg3_set_power_state(tp, PCI_D3hot);
7712
7713         netif_carrier_off(tp->dev);
7714
7715         return 0;
7716 }
7717
7718 static inline unsigned long get_stat64(tg3_stat64_t *val)
7719 {
7720         unsigned long ret;
7721
7722 #if (BITS_PER_LONG == 32)
7723         ret = val->low;
7724 #else
7725         ret = ((u64)val->high << 32) | ((u64)val->low);
7726 #endif
7727         return ret;
7728 }
7729
7730 static unsigned long calc_crc_errors(struct tg3 *tp)
7731 {
7732         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7733
7734         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7735             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7736              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7737                 u32 val;
7738
7739                 spin_lock_bh(&tp->lock);
7740                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7741                         tg3_writephy(tp, MII_TG3_TEST1,
7742                                      val | MII_TG3_TEST1_CRC_EN);
7743                         tg3_readphy(tp, 0x14, &val);
7744                 } else
7745                         val = 0;
7746                 spin_unlock_bh(&tp->lock);
7747
7748                 tp->phy_crc_errors += val;
7749
7750                 return tp->phy_crc_errors;
7751         }
7752
7753         return get_stat64(&hw_stats->rx_fcs_errors);
7754 }
7755
7756 #define ESTAT_ADD(member) \
7757         estats->member =        old_estats->member + \
7758                                 get_stat64(&hw_stats->member)
7759
7760 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7761 {
7762         struct tg3_ethtool_stats *estats = &tp->estats;
7763         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7764         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7765
7766         if (!hw_stats)
7767                 return old_estats;
7768
7769         ESTAT_ADD(rx_octets);
7770         ESTAT_ADD(rx_fragments);
7771         ESTAT_ADD(rx_ucast_packets);
7772         ESTAT_ADD(rx_mcast_packets);
7773         ESTAT_ADD(rx_bcast_packets);
7774         ESTAT_ADD(rx_fcs_errors);
7775         ESTAT_ADD(rx_align_errors);
7776         ESTAT_ADD(rx_xon_pause_rcvd);
7777         ESTAT_ADD(rx_xoff_pause_rcvd);
7778         ESTAT_ADD(rx_mac_ctrl_rcvd);
7779         ESTAT_ADD(rx_xoff_entered);
7780         ESTAT_ADD(rx_frame_too_long_errors);
7781         ESTAT_ADD(rx_jabbers);
7782         ESTAT_ADD(rx_undersize_packets);
7783         ESTAT_ADD(rx_in_length_errors);
7784         ESTAT_ADD(rx_out_length_errors);
7785         ESTAT_ADD(rx_64_or_less_octet_packets);
7786         ESTAT_ADD(rx_65_to_127_octet_packets);
7787         ESTAT_ADD(rx_128_to_255_octet_packets);
7788         ESTAT_ADD(rx_256_to_511_octet_packets);
7789         ESTAT_ADD(rx_512_to_1023_octet_packets);
7790         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7791         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7792         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7793         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7794         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7795
7796         ESTAT_ADD(tx_octets);
7797         ESTAT_ADD(tx_collisions);
7798         ESTAT_ADD(tx_xon_sent);
7799         ESTAT_ADD(tx_xoff_sent);
7800         ESTAT_ADD(tx_flow_control);
7801         ESTAT_ADD(tx_mac_errors);
7802         ESTAT_ADD(tx_single_collisions);
7803         ESTAT_ADD(tx_mult_collisions);
7804         ESTAT_ADD(tx_deferred);
7805         ESTAT_ADD(tx_excessive_collisions);
7806         ESTAT_ADD(tx_late_collisions);
7807         ESTAT_ADD(tx_collide_2times);
7808         ESTAT_ADD(tx_collide_3times);
7809         ESTAT_ADD(tx_collide_4times);
7810         ESTAT_ADD(tx_collide_5times);
7811         ESTAT_ADD(tx_collide_6times);
7812         ESTAT_ADD(tx_collide_7times);
7813         ESTAT_ADD(tx_collide_8times);
7814         ESTAT_ADD(tx_collide_9times);
7815         ESTAT_ADD(tx_collide_10times);
7816         ESTAT_ADD(tx_collide_11times);
7817         ESTAT_ADD(tx_collide_12times);
7818         ESTAT_ADD(tx_collide_13times);
7819         ESTAT_ADD(tx_collide_14times);
7820         ESTAT_ADD(tx_collide_15times);
7821         ESTAT_ADD(tx_ucast_packets);
7822         ESTAT_ADD(tx_mcast_packets);
7823         ESTAT_ADD(tx_bcast_packets);
7824         ESTAT_ADD(tx_carrier_sense_errors);
7825         ESTAT_ADD(tx_discards);
7826         ESTAT_ADD(tx_errors);
7827
7828         ESTAT_ADD(dma_writeq_full);
7829         ESTAT_ADD(dma_write_prioq_full);
7830         ESTAT_ADD(rxbds_empty);
7831         ESTAT_ADD(rx_discards);
7832         ESTAT_ADD(rx_errors);
7833         ESTAT_ADD(rx_threshold_hit);
7834
7835         ESTAT_ADD(dma_readq_full);
7836         ESTAT_ADD(dma_read_prioq_full);
7837         ESTAT_ADD(tx_comp_queue_full);
7838
7839         ESTAT_ADD(ring_set_send_prod_index);
7840         ESTAT_ADD(ring_status_update);
7841         ESTAT_ADD(nic_irqs);
7842         ESTAT_ADD(nic_avoided_irqs);
7843         ESTAT_ADD(nic_tx_threshold_hit);
7844
7845         return estats;
7846 }
7847
7848 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7849 {
7850         struct tg3 *tp = netdev_priv(dev);
7851         struct net_device_stats *stats = &tp->net_stats;
7852         struct net_device_stats *old_stats = &tp->net_stats_prev;
7853         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7854
7855         if (!hw_stats)
7856                 return old_stats;
7857
7858         stats->rx_packets = old_stats->rx_packets +
7859                 get_stat64(&hw_stats->rx_ucast_packets) +
7860                 get_stat64(&hw_stats->rx_mcast_packets) +
7861                 get_stat64(&hw_stats->rx_bcast_packets);
7862
7863         stats->tx_packets = old_stats->tx_packets +
7864                 get_stat64(&hw_stats->tx_ucast_packets) +
7865                 get_stat64(&hw_stats->tx_mcast_packets) +
7866                 get_stat64(&hw_stats->tx_bcast_packets);
7867
7868         stats->rx_bytes = old_stats->rx_bytes +
7869                 get_stat64(&hw_stats->rx_octets);
7870         stats->tx_bytes = old_stats->tx_bytes +
7871                 get_stat64(&hw_stats->tx_octets);
7872
7873         stats->rx_errors = old_stats->rx_errors +
7874                 get_stat64(&hw_stats->rx_errors);
7875         stats->tx_errors = old_stats->tx_errors +
7876                 get_stat64(&hw_stats->tx_errors) +
7877                 get_stat64(&hw_stats->tx_mac_errors) +
7878                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7879                 get_stat64(&hw_stats->tx_discards);
7880
7881         stats->multicast = old_stats->multicast +
7882                 get_stat64(&hw_stats->rx_mcast_packets);
7883         stats->collisions = old_stats->collisions +
7884                 get_stat64(&hw_stats->tx_collisions);
7885
7886         stats->rx_length_errors = old_stats->rx_length_errors +
7887                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7888                 get_stat64(&hw_stats->rx_undersize_packets);
7889
7890         stats->rx_over_errors = old_stats->rx_over_errors +
7891                 get_stat64(&hw_stats->rxbds_empty);
7892         stats->rx_frame_errors = old_stats->rx_frame_errors +
7893                 get_stat64(&hw_stats->rx_align_errors);
7894         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7895                 get_stat64(&hw_stats->tx_discards);
7896         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7897                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7898
7899         stats->rx_crc_errors = old_stats->rx_crc_errors +
7900                 calc_crc_errors(tp);
7901
7902         stats->rx_missed_errors = old_stats->rx_missed_errors +
7903                 get_stat64(&hw_stats->rx_discards);
7904
7905         return stats;
7906 }
7907
7908 static inline u32 calc_crc(unsigned char *buf, int len)
7909 {
7910         u32 reg;
7911         u32 tmp;
7912         int j, k;
7913
7914         reg = 0xffffffff;
7915
7916         for (j = 0; j < len; j++) {
7917                 reg ^= buf[j];
7918
7919                 for (k = 0; k < 8; k++) {
7920                         tmp = reg & 0x01;
7921
7922                         reg >>= 1;
7923
7924                         if (tmp) {
7925                                 reg ^= 0xedb88320;
7926                         }
7927                 }
7928         }
7929
7930         return ~reg;
7931 }
7932
7933 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7934 {
7935         /* accept or reject all multicast frames */
7936         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7937         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7938         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7939         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7940 }
7941
7942 static void __tg3_set_rx_mode(struct net_device *dev)
7943 {
7944         struct tg3 *tp = netdev_priv(dev);
7945         u32 rx_mode;
7946
7947         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7948                                   RX_MODE_KEEP_VLAN_TAG);
7949
7950         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7951          * flag clear.
7952          */
7953 #if TG3_VLAN_TAG_USED
7954         if (!tp->vlgrp &&
7955             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7956                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7957 #else
7958         /* By definition, VLAN is disabled always in this
7959          * case.
7960          */
7961         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7962                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7963 #endif
7964
7965         if (dev->flags & IFF_PROMISC) {
7966                 /* Promiscuous mode. */
7967                 rx_mode |= RX_MODE_PROMISC;
7968         } else if (dev->flags & IFF_ALLMULTI) {
7969                 /* Accept all multicast. */
7970                 tg3_set_multi (tp, 1);
7971         } else if (dev->mc_count < 1) {
7972                 /* Reject all multicast. */
7973                 tg3_set_multi (tp, 0);
7974         } else {
7975                 /* Accept one or more multicast(s). */
7976                 struct dev_mc_list *mclist;
7977                 unsigned int i;
7978                 u32 mc_filter[4] = { 0, };
7979                 u32 regidx;
7980                 u32 bit;
7981                 u32 crc;
7982
7983                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7984                      i++, mclist = mclist->next) {
7985
7986                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7987                         bit = ~crc & 0x7f;
7988                         regidx = (bit & 0x60) >> 5;
7989                         bit &= 0x1f;
7990                         mc_filter[regidx] |= (1 << bit);
7991                 }
7992
7993                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7994                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7995                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7996                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7997         }
7998
7999         if (rx_mode != tp->rx_mode) {
8000                 tp->rx_mode = rx_mode;
8001                 tw32_f(MAC_RX_MODE, rx_mode);
8002                 udelay(10);
8003         }
8004 }
8005
8006 static void tg3_set_rx_mode(struct net_device *dev)
8007 {
8008         struct tg3 *tp = netdev_priv(dev);
8009
8010         if (!netif_running(dev))
8011                 return;
8012
8013         tg3_full_lock(tp, 0);
8014         __tg3_set_rx_mode(dev);
8015         tg3_full_unlock(tp);
8016 }
8017
8018 #define TG3_REGDUMP_LEN         (32 * 1024)
8019
8020 static int tg3_get_regs_len(struct net_device *dev)
8021 {
8022         return TG3_REGDUMP_LEN;
8023 }
8024
8025 static void tg3_get_regs(struct net_device *dev,
8026                 struct ethtool_regs *regs, void *_p)
8027 {
8028         u32 *p = _p;
8029         struct tg3 *tp = netdev_priv(dev);
8030         u8 *orig_p = _p;
8031         int i;
8032
8033         regs->version = 0;
8034
8035         memset(p, 0, TG3_REGDUMP_LEN);
8036
8037         if (tp->link_config.phy_is_low_power)
8038                 return;
8039
8040         tg3_full_lock(tp, 0);
8041
8042 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8043 #define GET_REG32_LOOP(base,len)                \
8044 do {    p = (u32 *)(orig_p + (base));           \
8045         for (i = 0; i < len; i += 4)            \
8046                 __GET_REG32((base) + i);        \
8047 } while (0)
8048 #define GET_REG32_1(reg)                        \
8049 do {    p = (u32 *)(orig_p + (reg));            \
8050         __GET_REG32((reg));                     \
8051 } while (0)
8052
8053         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8054         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8055         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8056         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8057         GET_REG32_1(SNDDATAC_MODE);
8058         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8059         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8060         GET_REG32_1(SNDBDC_MODE);
8061         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8062         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8063         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8064         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8065         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8066         GET_REG32_1(RCVDCC_MODE);
8067         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8068         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8069         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8070         GET_REG32_1(MBFREE_MODE);
8071         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8072         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8073         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8074         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8075         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8076         GET_REG32_1(RX_CPU_MODE);
8077         GET_REG32_1(RX_CPU_STATE);
8078         GET_REG32_1(RX_CPU_PGMCTR);
8079         GET_REG32_1(RX_CPU_HWBKPT);
8080         GET_REG32_1(TX_CPU_MODE);
8081         GET_REG32_1(TX_CPU_STATE);
8082         GET_REG32_1(TX_CPU_PGMCTR);
8083         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8084         GET_REG32_LOOP(FTQ_RESET, 0x120);
8085         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8086         GET_REG32_1(DMAC_MODE);
8087         GET_REG32_LOOP(GRC_MODE, 0x4c);
8088         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8089                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8090
8091 #undef __GET_REG32
8092 #undef GET_REG32_LOOP
8093 #undef GET_REG32_1
8094
8095         tg3_full_unlock(tp);
8096 }
8097
8098 static int tg3_get_eeprom_len(struct net_device *dev)
8099 {
8100         struct tg3 *tp = netdev_priv(dev);
8101
8102         return tp->nvram_size;
8103 }
8104
8105 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8106 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8107
8108 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8109 {
8110         struct tg3 *tp = netdev_priv(dev);
8111         int ret;
8112         u8  *pd;
8113         u32 i, offset, len, val, b_offset, b_count;
8114
8115         if (tp->link_config.phy_is_low_power)
8116                 return -EAGAIN;
8117
8118         offset = eeprom->offset;
8119         len = eeprom->len;
8120         eeprom->len = 0;
8121
8122         eeprom->magic = TG3_EEPROM_MAGIC;
8123
8124         if (offset & 3) {
8125                 /* adjustments to start on required 4 byte boundary */
8126                 b_offset = offset & 3;
8127                 b_count = 4 - b_offset;
8128                 if (b_count > len) {
8129                         /* i.e. offset=1 len=2 */
8130                         b_count = len;
8131                 }
8132                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
8133                 if (ret)
8134                         return ret;
8135                 val = cpu_to_le32(val);
8136                 memcpy(data, ((char*)&val) + b_offset, b_count);
8137                 len -= b_count;
8138                 offset += b_count;
8139                 eeprom->len += b_count;
8140         }
8141
8142         /* read bytes upto the last 4 byte boundary */
8143         pd = &data[eeprom->len];
8144         for (i = 0; i < (len - (len & 3)); i += 4) {
8145                 ret = tg3_nvram_read(tp, offset + i, &val);
8146                 if (ret) {
8147                         eeprom->len += i;
8148                         return ret;
8149                 }
8150                 val = cpu_to_le32(val);
8151                 memcpy(pd + i, &val, 4);
8152         }
8153         eeprom->len += i;
8154
8155         if (len & 3) {
8156                 /* read last bytes not ending on 4 byte boundary */
8157                 pd = &data[eeprom->len];
8158                 b_count = len & 3;
8159                 b_offset = offset + len - b_count;
8160                 ret = tg3_nvram_read(tp, b_offset, &val);
8161                 if (ret)
8162                         return ret;
8163                 val = cpu_to_le32(val);
8164                 memcpy(pd, ((char*)&val), b_count);
8165                 eeprom->len += b_count;
8166         }
8167         return 0;
8168 }
8169
8170 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8171
8172 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8173 {
8174         struct tg3 *tp = netdev_priv(dev);
8175         int ret;
8176         u32 offset, len, b_offset, odd_len, start, end;
8177         u8 *buf;
8178
8179         if (tp->link_config.phy_is_low_power)
8180                 return -EAGAIN;
8181
8182         if (eeprom->magic != TG3_EEPROM_MAGIC)
8183                 return -EINVAL;
8184
8185         offset = eeprom->offset;
8186         len = eeprom->len;
8187
8188         if ((b_offset = (offset & 3))) {
8189                 /* adjustments to start on required 4 byte boundary */
8190                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
8191                 if (ret)
8192                         return ret;
8193                 start = cpu_to_le32(start);
8194                 len += b_offset;
8195                 offset &= ~3;
8196                 if (len < 4)
8197                         len = 4;
8198         }
8199
8200         odd_len = 0;
8201         if (len & 3) {
8202                 /* adjustments to end on required 4 byte boundary */
8203                 odd_len = 1;
8204                 len = (len + 3) & ~3;
8205                 ret = tg3_nvram_read(tp, offset+len-4, &end);
8206                 if (ret)
8207                         return ret;
8208                 end = cpu_to_le32(end);
8209         }
8210
8211         buf = data;
8212         if (b_offset || odd_len) {
8213                 buf = kmalloc(len, GFP_KERNEL);
8214                 if (!buf)
8215                         return -ENOMEM;
8216                 if (b_offset)
8217                         memcpy(buf, &start, 4);
8218                 if (odd_len)
8219                         memcpy(buf+len-4, &end, 4);
8220                 memcpy(buf + b_offset, data, eeprom->len);
8221         }
8222
8223         ret = tg3_nvram_write_block(tp, offset, len, buf);
8224
8225         if (buf != data)
8226                 kfree(buf);
8227
8228         return ret;
8229 }
8230
8231 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8232 {
8233         struct tg3 *tp = netdev_priv(dev);
8234
8235         cmd->supported = (SUPPORTED_Autoneg);
8236
8237         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8238                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8239                                    SUPPORTED_1000baseT_Full);
8240
8241         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8242                 cmd->supported |= (SUPPORTED_100baseT_Half |
8243                                   SUPPORTED_100baseT_Full |
8244                                   SUPPORTED_10baseT_Half |
8245                                   SUPPORTED_10baseT_Full |
8246                                   SUPPORTED_MII);
8247                 cmd->port = PORT_TP;
8248         } else {
8249                 cmd->supported |= SUPPORTED_FIBRE;
8250                 cmd->port = PORT_FIBRE;
8251         }
8252
8253         cmd->advertising = tp->link_config.advertising;
8254         if (netif_running(dev)) {
8255                 cmd->speed = tp->link_config.active_speed;
8256                 cmd->duplex = tp->link_config.active_duplex;
8257         }
8258         cmd->phy_address = PHY_ADDR;
8259         cmd->transceiver = 0;
8260         cmd->autoneg = tp->link_config.autoneg;
8261         cmd->maxtxpkt = 0;
8262         cmd->maxrxpkt = 0;
8263         return 0;
8264 }
8265
8266 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8267 {
8268         struct tg3 *tp = netdev_priv(dev);
8269
8270         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8271                 /* These are the only valid advertisement bits allowed.  */
8272                 if (cmd->autoneg == AUTONEG_ENABLE &&
8273                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8274                                           ADVERTISED_1000baseT_Full |
8275                                           ADVERTISED_Autoneg |
8276                                           ADVERTISED_FIBRE)))
8277                         return -EINVAL;
8278                 /* Fiber can only do SPEED_1000.  */
8279                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8280                          (cmd->speed != SPEED_1000))
8281                         return -EINVAL;
8282         /* Copper cannot force SPEED_1000.  */
8283         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8284                    (cmd->speed == SPEED_1000))
8285                 return -EINVAL;
8286         else if ((cmd->speed == SPEED_1000) &&
8287                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8288                 return -EINVAL;
8289
8290         tg3_full_lock(tp, 0);
8291
8292         tp->link_config.autoneg = cmd->autoneg;
8293         if (cmd->autoneg == AUTONEG_ENABLE) {
8294                 tp->link_config.advertising = (cmd->advertising |
8295                                               ADVERTISED_Autoneg);
8296                 tp->link_config.speed = SPEED_INVALID;
8297                 tp->link_config.duplex = DUPLEX_INVALID;
8298         } else {
8299                 tp->link_config.advertising = 0;
8300                 tp->link_config.speed = cmd->speed;
8301                 tp->link_config.duplex = cmd->duplex;
8302         }
8303
8304         tp->link_config.orig_speed = tp->link_config.speed;
8305         tp->link_config.orig_duplex = tp->link_config.duplex;
8306         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8307
8308         if (netif_running(dev))
8309                 tg3_setup_phy(tp, 1);
8310
8311         tg3_full_unlock(tp);
8312
8313         return 0;
8314 }
8315
8316 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8317 {
8318         struct tg3 *tp = netdev_priv(dev);
8319
8320         strcpy(info->driver, DRV_MODULE_NAME);
8321         strcpy(info->version, DRV_MODULE_VERSION);
8322         strcpy(info->fw_version, tp->fw_ver);
8323         strcpy(info->bus_info, pci_name(tp->pdev));
8324 }
8325
8326 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8327 {
8328         struct tg3 *tp = netdev_priv(dev);
8329
8330         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8331                 wol->supported = WAKE_MAGIC;
8332         else
8333                 wol->supported = 0;
8334         wol->wolopts = 0;
8335         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8336                 wol->wolopts = WAKE_MAGIC;
8337         memset(&wol->sopass, 0, sizeof(wol->sopass));
8338 }
8339
8340 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8341 {
8342         struct tg3 *tp = netdev_priv(dev);
8343
8344         if (wol->wolopts & ~WAKE_MAGIC)
8345                 return -EINVAL;
8346         if ((wol->wolopts & WAKE_MAGIC) &&
8347             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8348                 return -EINVAL;
8349
8350         spin_lock_bh(&tp->lock);
8351         if (wol->wolopts & WAKE_MAGIC)
8352                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8353         else
8354                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8355         spin_unlock_bh(&tp->lock);
8356
8357         return 0;
8358 }
8359
8360 static u32 tg3_get_msglevel(struct net_device *dev)
8361 {
8362         struct tg3 *tp = netdev_priv(dev);
8363         return tp->msg_enable;
8364 }
8365
8366 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8367 {
8368         struct tg3 *tp = netdev_priv(dev);
8369         tp->msg_enable = value;
8370 }
8371
8372 static int tg3_set_tso(struct net_device *dev, u32 value)
8373 {
8374         struct tg3 *tp = netdev_priv(dev);
8375
8376         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8377                 if (value)
8378                         return -EINVAL;
8379                 return 0;
8380         }
8381         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8382             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8383                 if (value) {
8384                         dev->features |= NETIF_F_TSO6;
8385                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8386                                 dev->features |= NETIF_F_TSO_ECN;
8387                 } else
8388                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8389         }
8390         return ethtool_op_set_tso(dev, value);
8391 }
8392
8393 static int tg3_nway_reset(struct net_device *dev)
8394 {
8395         struct tg3 *tp = netdev_priv(dev);
8396         u32 bmcr;
8397         int r;
8398
8399         if (!netif_running(dev))
8400                 return -EAGAIN;
8401
8402         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8403                 return -EINVAL;
8404
8405         spin_lock_bh(&tp->lock);
8406         r = -EINVAL;
8407         tg3_readphy(tp, MII_BMCR, &bmcr);
8408         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8409             ((bmcr & BMCR_ANENABLE) ||
8410              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8411                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8412                                            BMCR_ANENABLE);
8413                 r = 0;
8414         }
8415         spin_unlock_bh(&tp->lock);
8416
8417         return r;
8418 }
8419
8420 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8421 {
8422         struct tg3 *tp = netdev_priv(dev);
8423
8424         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8425         ering->rx_mini_max_pending = 0;
8426         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8427                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8428         else
8429                 ering->rx_jumbo_max_pending = 0;
8430
8431         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8432
8433         ering->rx_pending = tp->rx_pending;
8434         ering->rx_mini_pending = 0;
8435         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8436                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8437         else
8438                 ering->rx_jumbo_pending = 0;
8439
8440         ering->tx_pending = tp->tx_pending;
8441 }
8442
8443 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8444 {
8445         struct tg3 *tp = netdev_priv(dev);
8446         int irq_sync = 0, err = 0;
8447
8448         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8449             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8450             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8451             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8452             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8453              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8454                 return -EINVAL;
8455
8456         if (netif_running(dev)) {
8457                 tg3_netif_stop(tp);
8458                 irq_sync = 1;
8459         }
8460
8461         tg3_full_lock(tp, irq_sync);
8462
8463         tp->rx_pending = ering->rx_pending;
8464
8465         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8466             tp->rx_pending > 63)
8467                 tp->rx_pending = 63;
8468         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8469         tp->tx_pending = ering->tx_pending;
8470
8471         if (netif_running(dev)) {
8472                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8473                 err = tg3_restart_hw(tp, 1);
8474                 if (!err)
8475                         tg3_netif_start(tp);
8476         }
8477
8478         tg3_full_unlock(tp);
8479
8480         return err;
8481 }
8482
8483 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8484 {
8485         struct tg3 *tp = netdev_priv(dev);
8486
8487         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8488         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8489         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8490 }
8491
8492 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8493 {
8494         struct tg3 *tp = netdev_priv(dev);
8495         int irq_sync = 0, err = 0;
8496
8497         if (netif_running(dev)) {
8498                 tg3_netif_stop(tp);
8499                 irq_sync = 1;
8500         }
8501
8502         tg3_full_lock(tp, irq_sync);
8503
8504         if (epause->autoneg)
8505                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8506         else
8507                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8508         if (epause->rx_pause)
8509                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8510         else
8511                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8512         if (epause->tx_pause)
8513                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8514         else
8515                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8516
8517         if (netif_running(dev)) {
8518                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8519                 err = tg3_restart_hw(tp, 1);
8520                 if (!err)
8521                         tg3_netif_start(tp);
8522         }
8523
8524         tg3_full_unlock(tp);
8525
8526         return err;
8527 }
8528
8529 static u32 tg3_get_rx_csum(struct net_device *dev)
8530 {
8531         struct tg3 *tp = netdev_priv(dev);
8532         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8533 }
8534
8535 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8536 {
8537         struct tg3 *tp = netdev_priv(dev);
8538
8539         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8540                 if (data != 0)
8541                         return -EINVAL;
8542                 return 0;
8543         }
8544
8545         spin_lock_bh(&tp->lock);
8546         if (data)
8547                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8548         else
8549                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8550         spin_unlock_bh(&tp->lock);
8551
8552         return 0;
8553 }
8554
8555 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8556 {
8557         struct tg3 *tp = netdev_priv(dev);
8558
8559         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8560                 if (data != 0)
8561                         return -EINVAL;
8562                 return 0;
8563         }
8564
8565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8568             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8569                 ethtool_op_set_tx_ipv6_csum(dev, data);
8570         else
8571                 ethtool_op_set_tx_csum(dev, data);
8572
8573         return 0;
8574 }
8575
8576 static int tg3_get_sset_count (struct net_device *dev, int sset)
8577 {
8578         switch (sset) {
8579         case ETH_SS_TEST:
8580                 return TG3_NUM_TEST;
8581         case ETH_SS_STATS:
8582                 return TG3_NUM_STATS;
8583         default:
8584                 return -EOPNOTSUPP;
8585         }
8586 }
8587
8588 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8589 {
8590         switch (stringset) {
8591         case ETH_SS_STATS:
8592                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8593                 break;
8594         case ETH_SS_TEST:
8595                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8596                 break;
8597         default:
8598                 WARN_ON(1);     /* we need a WARN() */
8599                 break;
8600         }
8601 }
8602
8603 static int tg3_phys_id(struct net_device *dev, u32 data)
8604 {
8605         struct tg3 *tp = netdev_priv(dev);
8606         int i;
8607
8608         if (!netif_running(tp->dev))
8609                 return -EAGAIN;
8610
8611         if (data == 0)
8612                 data = 2;
8613
8614         for (i = 0; i < (data * 2); i++) {
8615                 if ((i % 2) == 0)
8616                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8617                                            LED_CTRL_1000MBPS_ON |
8618                                            LED_CTRL_100MBPS_ON |
8619                                            LED_CTRL_10MBPS_ON |
8620                                            LED_CTRL_TRAFFIC_OVERRIDE |
8621                                            LED_CTRL_TRAFFIC_BLINK |
8622                                            LED_CTRL_TRAFFIC_LED);
8623
8624                 else
8625                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8626                                            LED_CTRL_TRAFFIC_OVERRIDE);
8627
8628                 if (msleep_interruptible(500))
8629                         break;
8630         }
8631         tw32(MAC_LED_CTRL, tp->led_ctrl);
8632         return 0;
8633 }
8634
8635 static void tg3_get_ethtool_stats (struct net_device *dev,
8636                                    struct ethtool_stats *estats, u64 *tmp_stats)
8637 {
8638         struct tg3 *tp = netdev_priv(dev);
8639         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8640 }
8641
8642 #define NVRAM_TEST_SIZE 0x100
8643 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8644 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8645 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8646
8647 static int tg3_test_nvram(struct tg3 *tp)
8648 {
8649         u32 *buf, csum, magic;
8650         int i, j, k, err = 0, size;
8651
8652         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8653                 return -EIO;
8654
8655         if (magic == TG3_EEPROM_MAGIC)
8656                 size = NVRAM_TEST_SIZE;
8657         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8658                 if ((magic & 0xe00000) == 0x200000)
8659                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8660                 else
8661                         return 0;
8662         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8663                 size = NVRAM_SELFBOOT_HW_SIZE;
8664         else
8665                 return -EIO;
8666
8667         buf = kmalloc(size, GFP_KERNEL);
8668         if (buf == NULL)
8669                 return -ENOMEM;
8670
8671         err = -EIO;
8672         for (i = 0, j = 0; i < size; i += 4, j++) {
8673                 u32 val;
8674
8675                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8676                         break;
8677                 buf[j] = cpu_to_le32(val);
8678         }
8679         if (i < size)
8680                 goto out;
8681
8682         /* Selfboot format */
8683         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8684             TG3_EEPROM_MAGIC_FW) {
8685                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8686
8687                 for (i = 0; i < size; i++)
8688                         csum8 += buf8[i];
8689
8690                 if (csum8 == 0) {
8691                         err = 0;
8692                         goto out;
8693                 }
8694
8695                 err = -EIO;
8696                 goto out;
8697         }
8698
8699         if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8700             TG3_EEPROM_MAGIC_HW) {
8701                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8702                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8703                 u8 *buf8 = (u8 *) buf;
8704
8705                 /* Separate the parity bits and the data bytes.  */
8706                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8707                         if ((i == 0) || (i == 8)) {
8708                                 int l;
8709                                 u8 msk;
8710
8711                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8712                                         parity[k++] = buf8[i] & msk;
8713                                 i++;
8714                         }
8715                         else if (i == 16) {
8716                                 int l;
8717                                 u8 msk;
8718
8719                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8720                                         parity[k++] = buf8[i] & msk;
8721                                 i++;
8722
8723                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8724                                         parity[k++] = buf8[i] & msk;
8725                                 i++;
8726                         }
8727                         data[j++] = buf8[i];
8728                 }
8729
8730                 err = -EIO;
8731                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8732                         u8 hw8 = hweight8(data[i]);
8733
8734                         if ((hw8 & 0x1) && parity[i])
8735                                 goto out;
8736                         else if (!(hw8 & 0x1) && !parity[i])
8737                                 goto out;
8738                 }
8739                 err = 0;
8740                 goto out;
8741         }
8742
8743         /* Bootstrap checksum at offset 0x10 */
8744         csum = calc_crc((unsigned char *) buf, 0x10);
8745         if(csum != cpu_to_le32(buf[0x10/4]))
8746                 goto out;
8747
8748         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8749         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8750         if (csum != cpu_to_le32(buf[0xfc/4]))
8751                  goto out;
8752
8753         err = 0;
8754
8755 out:
8756         kfree(buf);
8757         return err;
8758 }
8759
8760 #define TG3_SERDES_TIMEOUT_SEC  2
8761 #define TG3_COPPER_TIMEOUT_SEC  6
8762
8763 static int tg3_test_link(struct tg3 *tp)
8764 {
8765         int i, max;
8766
8767         if (!netif_running(tp->dev))
8768                 return -ENODEV;
8769
8770         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8771                 max = TG3_SERDES_TIMEOUT_SEC;
8772         else
8773                 max = TG3_COPPER_TIMEOUT_SEC;
8774
8775         for (i = 0; i < max; i++) {
8776                 if (netif_carrier_ok(tp->dev))
8777                         return 0;
8778
8779                 if (msleep_interruptible(1000))
8780                         break;
8781         }
8782
8783         return -EIO;
8784 }
8785
8786 /* Only test the commonly used registers */
8787 static int tg3_test_registers(struct tg3 *tp)
8788 {
8789         int i, is_5705, is_5750;
8790         u32 offset, read_mask, write_mask, val, save_val, read_val;
8791         static struct {
8792                 u16 offset;
8793                 u16 flags;
8794 #define TG3_FL_5705     0x1
8795 #define TG3_FL_NOT_5705 0x2
8796 #define TG3_FL_NOT_5788 0x4
8797 #define TG3_FL_NOT_5750 0x8
8798                 u32 read_mask;
8799                 u32 write_mask;
8800         } reg_tbl[] = {
8801                 /* MAC Control Registers */
8802                 { MAC_MODE, TG3_FL_NOT_5705,
8803                         0x00000000, 0x00ef6f8c },
8804                 { MAC_MODE, TG3_FL_5705,
8805                         0x00000000, 0x01ef6b8c },
8806                 { MAC_STATUS, TG3_FL_NOT_5705,
8807                         0x03800107, 0x00000000 },
8808                 { MAC_STATUS, TG3_FL_5705,
8809                         0x03800100, 0x00000000 },
8810                 { MAC_ADDR_0_HIGH, 0x0000,
8811                         0x00000000, 0x0000ffff },
8812                 { MAC_ADDR_0_LOW, 0x0000,
8813                         0x00000000, 0xffffffff },
8814                 { MAC_RX_MTU_SIZE, 0x0000,
8815                         0x00000000, 0x0000ffff },
8816                 { MAC_TX_MODE, 0x0000,
8817                         0x00000000, 0x00000070 },
8818                 { MAC_TX_LENGTHS, 0x0000,
8819                         0x00000000, 0x00003fff },
8820                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8821                         0x00000000, 0x000007fc },
8822                 { MAC_RX_MODE, TG3_FL_5705,
8823                         0x00000000, 0x000007dc },
8824                 { MAC_HASH_REG_0, 0x0000,
8825                         0x00000000, 0xffffffff },
8826                 { MAC_HASH_REG_1, 0x0000,
8827                         0x00000000, 0xffffffff },
8828                 { MAC_HASH_REG_2, 0x0000,
8829                         0x00000000, 0xffffffff },
8830                 { MAC_HASH_REG_3, 0x0000,
8831                         0x00000000, 0xffffffff },
8832
8833                 /* Receive Data and Receive BD Initiator Control Registers. */
8834                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8835                         0x00000000, 0xffffffff },
8836                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8837                         0x00000000, 0xffffffff },
8838                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8839                         0x00000000, 0x00000003 },
8840                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8841                         0x00000000, 0xffffffff },
8842                 { RCVDBDI_STD_BD+0, 0x0000,
8843                         0x00000000, 0xffffffff },
8844                 { RCVDBDI_STD_BD+4, 0x0000,
8845                         0x00000000, 0xffffffff },
8846                 { RCVDBDI_STD_BD+8, 0x0000,
8847                         0x00000000, 0xffff0002 },
8848                 { RCVDBDI_STD_BD+0xc, 0x0000,
8849                         0x00000000, 0xffffffff },
8850
8851                 /* Receive BD Initiator Control Registers. */
8852                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8853                         0x00000000, 0xffffffff },
8854                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8855                         0x00000000, 0x000003ff },
8856                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8857                         0x00000000, 0xffffffff },
8858
8859                 /* Host Coalescing Control Registers. */
8860                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8861                         0x00000000, 0x00000004 },
8862                 { HOSTCC_MODE, TG3_FL_5705,
8863                         0x00000000, 0x000000f6 },
8864                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8865                         0x00000000, 0xffffffff },
8866                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8867                         0x00000000, 0x000003ff },
8868                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8869                         0x00000000, 0xffffffff },
8870                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8871                         0x00000000, 0x000003ff },
8872                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8873                         0x00000000, 0xffffffff },
8874                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8875                         0x00000000, 0x000000ff },
8876                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8877                         0x00000000, 0xffffffff },
8878                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8879                         0x00000000, 0x000000ff },
8880                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8881                         0x00000000, 0xffffffff },
8882                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8883                         0x00000000, 0xffffffff },
8884                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8885                         0x00000000, 0xffffffff },
8886                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8887                         0x00000000, 0x000000ff },
8888                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8889                         0x00000000, 0xffffffff },
8890                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8891                         0x00000000, 0x000000ff },
8892                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8893                         0x00000000, 0xffffffff },
8894                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8895                         0x00000000, 0xffffffff },
8896                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8897                         0x00000000, 0xffffffff },
8898                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8899                         0x00000000, 0xffffffff },
8900                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8901                         0x00000000, 0xffffffff },
8902                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8903                         0xffffffff, 0x00000000 },
8904                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8905                         0xffffffff, 0x00000000 },
8906
8907                 /* Buffer Manager Control Registers. */
8908                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8909                         0x00000000, 0x007fff80 },
8910                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8911                         0x00000000, 0x007fffff },
8912                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8913                         0x00000000, 0x0000003f },
8914                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8915                         0x00000000, 0x000001ff },
8916                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8917                         0x00000000, 0x000001ff },
8918                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8919                         0xffffffff, 0x00000000 },
8920                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8921                         0xffffffff, 0x00000000 },
8922
8923                 /* Mailbox Registers */
8924                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8925                         0x00000000, 0x000001ff },
8926                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8927                         0x00000000, 0x000001ff },
8928                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8929                         0x00000000, 0x000007ff },
8930                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8931                         0x00000000, 0x000001ff },
8932
8933                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8934         };
8935
8936         is_5705 = is_5750 = 0;
8937         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8938                 is_5705 = 1;
8939                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8940                         is_5750 = 1;
8941         }
8942
8943         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8944                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8945                         continue;
8946
8947                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8948                         continue;
8949
8950                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8951                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8952                         continue;
8953
8954                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8955                         continue;
8956
8957                 offset = (u32) reg_tbl[i].offset;
8958                 read_mask = reg_tbl[i].read_mask;
8959                 write_mask = reg_tbl[i].write_mask;
8960
8961                 /* Save the original register content */
8962                 save_val = tr32(offset);
8963
8964                 /* Determine the read-only value. */
8965                 read_val = save_val & read_mask;
8966
8967                 /* Write zero to the register, then make sure the read-only bits
8968                  * are not changed and the read/write bits are all zeros.
8969                  */
8970                 tw32(offset, 0);
8971
8972                 val = tr32(offset);
8973
8974                 /* Test the read-only and read/write bits. */
8975                 if (((val & read_mask) != read_val) || (val & write_mask))
8976                         goto out;
8977
8978                 /* Write ones to all the bits defined by RdMask and WrMask, then
8979                  * make sure the read-only bits are not changed and the
8980                  * read/write bits are all ones.
8981                  */
8982                 tw32(offset, read_mask | write_mask);
8983
8984                 val = tr32(offset);
8985
8986                 /* Test the read-only bits. */
8987                 if ((val & read_mask) != read_val)
8988                         goto out;
8989
8990                 /* Test the read/write bits. */
8991                 if ((val & write_mask) != write_mask)
8992                         goto out;
8993
8994                 tw32(offset, save_val);
8995         }
8996
8997         return 0;
8998
8999 out:
9000         if (netif_msg_hw(tp))
9001                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9002                        offset);
9003         tw32(offset, save_val);
9004         return -EIO;
9005 }
9006
9007 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9008 {
9009         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9010         int i;
9011         u32 j;
9012
9013         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
9014                 for (j = 0; j < len; j += 4) {
9015                         u32 val;
9016
9017                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9018                         tg3_read_mem(tp, offset + j, &val);
9019                         if (val != test_pattern[i])
9020                                 return -EIO;
9021                 }
9022         }
9023         return 0;
9024 }
9025
9026 static int tg3_test_memory(struct tg3 *tp)
9027 {
9028         static struct mem_entry {
9029                 u32 offset;
9030                 u32 len;
9031         } mem_tbl_570x[] = {
9032                 { 0x00000000, 0x00b50},
9033                 { 0x00002000, 0x1c000},
9034                 { 0xffffffff, 0x00000}
9035         }, mem_tbl_5705[] = {
9036                 { 0x00000100, 0x0000c},
9037                 { 0x00000200, 0x00008},
9038                 { 0x00004000, 0x00800},
9039                 { 0x00006000, 0x01000},
9040                 { 0x00008000, 0x02000},
9041                 { 0x00010000, 0x0e000},
9042                 { 0xffffffff, 0x00000}
9043         }, mem_tbl_5755[] = {
9044                 { 0x00000200, 0x00008},
9045                 { 0x00004000, 0x00800},
9046                 { 0x00006000, 0x00800},
9047                 { 0x00008000, 0x02000},
9048                 { 0x00010000, 0x0c000},
9049                 { 0xffffffff, 0x00000}
9050         }, mem_tbl_5906[] = {
9051                 { 0x00000200, 0x00008},
9052                 { 0x00004000, 0x00400},
9053                 { 0x00006000, 0x00400},
9054                 { 0x00008000, 0x01000},
9055                 { 0x00010000, 0x01000},
9056                 { 0xffffffff, 0x00000}
9057         };
9058         struct mem_entry *mem_tbl;
9059         int err = 0;
9060         int i;
9061
9062         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9063                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9064                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9065                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9066                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9067                         mem_tbl = mem_tbl_5755;
9068                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9069                         mem_tbl = mem_tbl_5906;
9070                 else
9071                         mem_tbl = mem_tbl_5705;
9072         } else
9073                 mem_tbl = mem_tbl_570x;
9074
9075         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9076                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9077                     mem_tbl[i].len)) != 0)
9078                         break;
9079         }
9080
9081         return err;
9082 }
9083
9084 #define TG3_MAC_LOOPBACK        0
9085 #define TG3_PHY_LOOPBACK        1
9086
9087 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9088 {
9089         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9090         u32 desc_idx;
9091         struct sk_buff *skb, *rx_skb;
9092         u8 *tx_data;
9093         dma_addr_t map;
9094         int num_pkts, tx_len, rx_len, i, err;
9095         struct tg3_rx_buffer_desc *desc;
9096
9097         if (loopback_mode == TG3_MAC_LOOPBACK) {
9098                 /* HW errata - mac loopback fails in some cases on 5780.
9099                  * Normal traffic and PHY loopback are not affected by
9100                  * errata.
9101                  */
9102                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9103                         return 0;
9104
9105                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9106                            MAC_MODE_PORT_INT_LPBACK;
9107                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9108                         mac_mode |= MAC_MODE_LINK_POLARITY;
9109                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9110                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9111                 else
9112                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9113                 tw32(MAC_MODE, mac_mode);
9114         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9115                 u32 val;
9116
9117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9118                         u32 phytest;
9119
9120                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9121                                 u32 phy;
9122
9123                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9124                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9125                                 if (!tg3_readphy(tp, 0x1b, &phy))
9126                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9127                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9128                         }
9129                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9130                 } else
9131                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9132
9133                 tg3_phy_toggle_automdix(tp, 0);
9134
9135                 tg3_writephy(tp, MII_BMCR, val);
9136                 udelay(40);
9137
9138                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9140                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9141                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9142                 } else
9143                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9144
9145                 /* reset to prevent losing 1st rx packet intermittently */
9146                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9147                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9148                         udelay(10);
9149                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9150                 }
9151                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9152                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9153                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9154                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9155                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9156                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9157                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9158                 }
9159                 tw32(MAC_MODE, mac_mode);
9160         }
9161         else
9162                 return -EINVAL;
9163
9164         err = -EIO;
9165
9166         tx_len = 1514;
9167         skb = netdev_alloc_skb(tp->dev, tx_len);
9168         if (!skb)
9169                 return -ENOMEM;
9170
9171         tx_data = skb_put(skb, tx_len);
9172         memcpy(tx_data, tp->dev->dev_addr, 6);
9173         memset(tx_data + 6, 0x0, 8);
9174
9175         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9176
9177         for (i = 14; i < tx_len; i++)
9178                 tx_data[i] = (u8) (i & 0xff);
9179
9180         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9181
9182         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9183              HOSTCC_MODE_NOW);
9184
9185         udelay(10);
9186
9187         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9188
9189         num_pkts = 0;
9190
9191         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9192
9193         tp->tx_prod++;
9194         num_pkts++;
9195
9196         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9197                      tp->tx_prod);
9198         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9199
9200         udelay(10);
9201
9202         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9203         for (i = 0; i < 25; i++) {
9204                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9205                        HOSTCC_MODE_NOW);
9206
9207                 udelay(10);
9208
9209                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9210                 rx_idx = tp->hw_status->idx[0].rx_producer;
9211                 if ((tx_idx == tp->tx_prod) &&
9212                     (rx_idx == (rx_start_idx + num_pkts)))
9213                         break;
9214         }
9215
9216         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9217         dev_kfree_skb(skb);
9218
9219         if (tx_idx != tp->tx_prod)
9220                 goto out;
9221
9222         if (rx_idx != rx_start_idx + num_pkts)
9223                 goto out;
9224
9225         desc = &tp->rx_rcb[rx_start_idx];
9226         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9227         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9228         if (opaque_key != RXD_OPAQUE_RING_STD)
9229                 goto out;
9230
9231         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9232             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9233                 goto out;
9234
9235         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9236         if (rx_len != tx_len)
9237                 goto out;
9238
9239         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9240
9241         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9242         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9243
9244         for (i = 14; i < tx_len; i++) {
9245                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9246                         goto out;
9247         }
9248         err = 0;
9249
9250         /* tg3_free_rings will unmap and free the rx_skb */
9251 out:
9252         return err;
9253 }
9254
9255 #define TG3_MAC_LOOPBACK_FAILED         1
9256 #define TG3_PHY_LOOPBACK_FAILED         2
9257 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9258                                          TG3_PHY_LOOPBACK_FAILED)
9259
9260 static int tg3_test_loopback(struct tg3 *tp)
9261 {
9262         int err = 0;
9263         u32 cpmuctrl = 0;
9264
9265         if (!netif_running(tp->dev))
9266                 return TG3_LOOPBACK_FAILED;
9267
9268         err = tg3_reset_hw(tp, 1);
9269         if (err)
9270                 return TG3_LOOPBACK_FAILED;
9271
9272         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9273                 int i;
9274                 u32 status;
9275
9276                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9277
9278                 /* Wait for up to 40 microseconds to acquire lock. */
9279                 for (i = 0; i < 4; i++) {
9280                         status = tr32(TG3_CPMU_MUTEX_GNT);
9281                         if (status == CPMU_MUTEX_GNT_DRIVER)
9282                                 break;
9283                         udelay(10);
9284                 }
9285
9286                 if (status != CPMU_MUTEX_GNT_DRIVER)
9287                         return TG3_LOOPBACK_FAILED;
9288
9289                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9290
9291                 /* Turn off power management based on link speed. */
9292                 tw32(TG3_CPMU_CTRL,
9293                      cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE);
9294         }
9295
9296         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9297                 err |= TG3_MAC_LOOPBACK_FAILED;
9298
9299         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9300                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9301
9302                 /* Release the mutex */
9303                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9304         }
9305
9306         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9307                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9308                         err |= TG3_PHY_LOOPBACK_FAILED;
9309         }
9310
9311         return err;
9312 }
9313
9314 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9315                           u64 *data)
9316 {
9317         struct tg3 *tp = netdev_priv(dev);
9318
9319         if (tp->link_config.phy_is_low_power)
9320                 tg3_set_power_state(tp, PCI_D0);
9321
9322         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9323
9324         if (tg3_test_nvram(tp) != 0) {
9325                 etest->flags |= ETH_TEST_FL_FAILED;
9326                 data[0] = 1;
9327         }
9328         if (tg3_test_link(tp) != 0) {
9329                 etest->flags |= ETH_TEST_FL_FAILED;
9330                 data[1] = 1;
9331         }
9332         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9333                 int err, irq_sync = 0;
9334
9335                 if (netif_running(dev)) {
9336                         tg3_netif_stop(tp);
9337                         irq_sync = 1;
9338                 }
9339
9340                 tg3_full_lock(tp, irq_sync);
9341
9342                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9343                 err = tg3_nvram_lock(tp);
9344                 tg3_halt_cpu(tp, RX_CPU_BASE);
9345                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9346                         tg3_halt_cpu(tp, TX_CPU_BASE);
9347                 if (!err)
9348                         tg3_nvram_unlock(tp);
9349
9350                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9351                         tg3_phy_reset(tp);
9352
9353                 if (tg3_test_registers(tp) != 0) {
9354                         etest->flags |= ETH_TEST_FL_FAILED;
9355                         data[2] = 1;
9356                 }
9357                 if (tg3_test_memory(tp) != 0) {
9358                         etest->flags |= ETH_TEST_FL_FAILED;
9359                         data[3] = 1;
9360                 }
9361                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9362                         etest->flags |= ETH_TEST_FL_FAILED;
9363
9364                 tg3_full_unlock(tp);
9365
9366                 if (tg3_test_interrupt(tp) != 0) {
9367                         etest->flags |= ETH_TEST_FL_FAILED;
9368                         data[5] = 1;
9369                 }
9370
9371                 tg3_full_lock(tp, 0);
9372
9373                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9374                 if (netif_running(dev)) {
9375                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9376                         if (!tg3_restart_hw(tp, 1))
9377                                 tg3_netif_start(tp);
9378                 }
9379
9380                 tg3_full_unlock(tp);
9381         }
9382         if (tp->link_config.phy_is_low_power)
9383                 tg3_set_power_state(tp, PCI_D3hot);
9384
9385 }
9386
9387 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9388 {
9389         struct mii_ioctl_data *data = if_mii(ifr);
9390         struct tg3 *tp = netdev_priv(dev);
9391         int err;
9392
9393         switch(cmd) {
9394         case SIOCGMIIPHY:
9395                 data->phy_id = PHY_ADDR;
9396
9397                 /* fallthru */
9398         case SIOCGMIIREG: {
9399                 u32 mii_regval;
9400
9401                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9402                         break;                  /* We have no PHY */
9403
9404                 if (tp->link_config.phy_is_low_power)
9405                         return -EAGAIN;
9406
9407                 spin_lock_bh(&tp->lock);
9408                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9409                 spin_unlock_bh(&tp->lock);
9410
9411                 data->val_out = mii_regval;
9412
9413                 return err;
9414         }
9415
9416         case SIOCSMIIREG:
9417                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9418                         break;                  /* We have no PHY */
9419
9420                 if (!capable(CAP_NET_ADMIN))
9421                         return -EPERM;
9422
9423                 if (tp->link_config.phy_is_low_power)
9424                         return -EAGAIN;
9425
9426                 spin_lock_bh(&tp->lock);
9427                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9428                 spin_unlock_bh(&tp->lock);
9429
9430                 return err;
9431
9432         default:
9433                 /* do nothing */
9434                 break;
9435         }
9436         return -EOPNOTSUPP;
9437 }
9438
9439 #if TG3_VLAN_TAG_USED
9440 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9441 {
9442         struct tg3 *tp = netdev_priv(dev);
9443
9444         if (netif_running(dev))
9445                 tg3_netif_stop(tp);
9446
9447         tg3_full_lock(tp, 0);
9448
9449         tp->vlgrp = grp;
9450
9451         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9452         __tg3_set_rx_mode(dev);
9453
9454         if (netif_running(dev))
9455                 tg3_netif_start(tp);
9456
9457         tg3_full_unlock(tp);
9458 }
9459 #endif
9460
9461 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9462 {
9463         struct tg3 *tp = netdev_priv(dev);
9464
9465         memcpy(ec, &tp->coal, sizeof(*ec));
9466         return 0;
9467 }
9468
9469 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9470 {
9471         struct tg3 *tp = netdev_priv(dev);
9472         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9473         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9474
9475         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9476                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9477                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9478                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9479                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9480         }
9481
9482         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9483             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9484             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9485             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9486             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9487             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9488             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9489             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9490             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9491             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9492                 return -EINVAL;
9493
9494         /* No rx interrupts will be generated if both are zero */
9495         if ((ec->rx_coalesce_usecs == 0) &&
9496             (ec->rx_max_coalesced_frames == 0))
9497                 return -EINVAL;
9498
9499         /* No tx interrupts will be generated if both are zero */
9500         if ((ec->tx_coalesce_usecs == 0) &&
9501             (ec->tx_max_coalesced_frames == 0))
9502                 return -EINVAL;
9503
9504         /* Only copy relevant parameters, ignore all others. */
9505         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9506         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9507         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9508         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9509         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9510         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9511         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9512         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9513         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9514
9515         if (netif_running(dev)) {
9516                 tg3_full_lock(tp, 0);
9517                 __tg3_set_coalesce(tp, &tp->coal);
9518                 tg3_full_unlock(tp);
9519         }
9520         return 0;
9521 }
9522
9523 static const struct ethtool_ops tg3_ethtool_ops = {
9524         .get_settings           = tg3_get_settings,
9525         .set_settings           = tg3_set_settings,
9526         .get_drvinfo            = tg3_get_drvinfo,
9527         .get_regs_len           = tg3_get_regs_len,
9528         .get_regs               = tg3_get_regs,
9529         .get_wol                = tg3_get_wol,
9530         .set_wol                = tg3_set_wol,
9531         .get_msglevel           = tg3_get_msglevel,
9532         .set_msglevel           = tg3_set_msglevel,
9533         .nway_reset             = tg3_nway_reset,
9534         .get_link               = ethtool_op_get_link,
9535         .get_eeprom_len         = tg3_get_eeprom_len,
9536         .get_eeprom             = tg3_get_eeprom,
9537         .set_eeprom             = tg3_set_eeprom,
9538         .get_ringparam          = tg3_get_ringparam,
9539         .set_ringparam          = tg3_set_ringparam,
9540         .get_pauseparam         = tg3_get_pauseparam,
9541         .set_pauseparam         = tg3_set_pauseparam,
9542         .get_rx_csum            = tg3_get_rx_csum,
9543         .set_rx_csum            = tg3_set_rx_csum,
9544         .set_tx_csum            = tg3_set_tx_csum,
9545         .set_sg                 = ethtool_op_set_sg,
9546         .set_tso                = tg3_set_tso,
9547         .self_test              = tg3_self_test,
9548         .get_strings            = tg3_get_strings,
9549         .phys_id                = tg3_phys_id,
9550         .get_ethtool_stats      = tg3_get_ethtool_stats,
9551         .get_coalesce           = tg3_get_coalesce,
9552         .set_coalesce           = tg3_set_coalesce,
9553         .get_sset_count         = tg3_get_sset_count,
9554 };
9555
9556 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9557 {
9558         u32 cursize, val, magic;
9559
9560         tp->nvram_size = EEPROM_CHIP_SIZE;
9561
9562         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9563                 return;
9564
9565         if ((magic != TG3_EEPROM_MAGIC) &&
9566             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9567             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9568                 return;
9569
9570         /*
9571          * Size the chip by reading offsets at increasing powers of two.
9572          * When we encounter our validation signature, we know the addressing
9573          * has wrapped around, and thus have our chip size.
9574          */
9575         cursize = 0x10;
9576
9577         while (cursize < tp->nvram_size) {
9578                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9579                         return;
9580
9581                 if (val == magic)
9582                         break;
9583
9584                 cursize <<= 1;
9585         }
9586
9587         tp->nvram_size = cursize;
9588 }
9589
9590 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9591 {
9592         u32 val;
9593
9594         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9595                 return;
9596
9597         /* Selfboot format */
9598         if (val != TG3_EEPROM_MAGIC) {
9599                 tg3_get_eeprom_size(tp);
9600                 return;
9601         }
9602
9603         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9604                 if (val != 0) {
9605                         tp->nvram_size = (val >> 16) * 1024;
9606                         return;
9607                 }
9608         }
9609         tp->nvram_size = 0x80000;
9610 }
9611
9612 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9613 {
9614         u32 nvcfg1;
9615
9616         nvcfg1 = tr32(NVRAM_CFG1);
9617         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9618                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9619         }
9620         else {
9621                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9622                 tw32(NVRAM_CFG1, nvcfg1);
9623         }
9624
9625         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9626             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9627                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9628                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9629                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9630                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9631                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9632                                 break;
9633                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9634                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9635                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9636                                 break;
9637                         case FLASH_VENDOR_ATMEL_EEPROM:
9638                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9639                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9640                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9641                                 break;
9642                         case FLASH_VENDOR_ST:
9643                                 tp->nvram_jedecnum = JEDEC_ST;
9644                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9645                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9646                                 break;
9647                         case FLASH_VENDOR_SAIFUN:
9648                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9649                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9650                                 break;
9651                         case FLASH_VENDOR_SST_SMALL:
9652                         case FLASH_VENDOR_SST_LARGE:
9653                                 tp->nvram_jedecnum = JEDEC_SST;
9654                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9655                                 break;
9656                 }
9657         }
9658         else {
9659                 tp->nvram_jedecnum = JEDEC_ATMEL;
9660                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9661                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9662         }
9663 }
9664
9665 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9666 {
9667         u32 nvcfg1;
9668
9669         nvcfg1 = tr32(NVRAM_CFG1);
9670
9671         /* NVRAM protection for TPM */
9672         if (nvcfg1 & (1 << 27))
9673                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9674
9675         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9676                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9677                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9678                         tp->nvram_jedecnum = JEDEC_ATMEL;
9679                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9680                         break;
9681                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9682                         tp->nvram_jedecnum = JEDEC_ATMEL;
9683                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9684                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9685                         break;
9686                 case FLASH_5752VENDOR_ST_M45PE10:
9687                 case FLASH_5752VENDOR_ST_M45PE20:
9688                 case FLASH_5752VENDOR_ST_M45PE40:
9689                         tp->nvram_jedecnum = JEDEC_ST;
9690                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9691                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9692                         break;
9693         }
9694
9695         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9696                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9697                         case FLASH_5752PAGE_SIZE_256:
9698                                 tp->nvram_pagesize = 256;
9699                                 break;
9700                         case FLASH_5752PAGE_SIZE_512:
9701                                 tp->nvram_pagesize = 512;
9702                                 break;
9703                         case FLASH_5752PAGE_SIZE_1K:
9704                                 tp->nvram_pagesize = 1024;
9705                                 break;
9706                         case FLASH_5752PAGE_SIZE_2K:
9707                                 tp->nvram_pagesize = 2048;
9708                                 break;
9709                         case FLASH_5752PAGE_SIZE_4K:
9710                                 tp->nvram_pagesize = 4096;
9711                                 break;
9712                         case FLASH_5752PAGE_SIZE_264:
9713                                 tp->nvram_pagesize = 264;
9714                                 break;
9715                 }
9716         }
9717         else {
9718                 /* For eeprom, set pagesize to maximum eeprom size */
9719                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9720
9721                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9722                 tw32(NVRAM_CFG1, nvcfg1);
9723         }
9724 }
9725
9726 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9727 {
9728         u32 nvcfg1, protect = 0;
9729
9730         nvcfg1 = tr32(NVRAM_CFG1);
9731
9732         /* NVRAM protection for TPM */
9733         if (nvcfg1 & (1 << 27)) {
9734                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9735                 protect = 1;
9736         }
9737
9738         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9739         switch (nvcfg1) {
9740                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9741                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9742                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9743                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9744                         tp->nvram_jedecnum = JEDEC_ATMEL;
9745                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9746                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9747                         tp->nvram_pagesize = 264;
9748                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9749                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9750                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9751                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9752                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9753                         else
9754                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9755                         break;
9756                 case FLASH_5752VENDOR_ST_M45PE10:
9757                 case FLASH_5752VENDOR_ST_M45PE20:
9758                 case FLASH_5752VENDOR_ST_M45PE40:
9759                         tp->nvram_jedecnum = JEDEC_ST;
9760                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9761                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9762                         tp->nvram_pagesize = 256;
9763                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9764                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9765                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9766                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9767                         else
9768                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9769                         break;
9770         }
9771 }
9772
9773 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9774 {
9775         u32 nvcfg1;
9776
9777         nvcfg1 = tr32(NVRAM_CFG1);
9778
9779         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9780                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9781                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9782                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9783                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9784                         tp->nvram_jedecnum = JEDEC_ATMEL;
9785                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9786                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9787
9788                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9789                         tw32(NVRAM_CFG1, nvcfg1);
9790                         break;
9791                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9792                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9793                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9794                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9795                         tp->nvram_jedecnum = JEDEC_ATMEL;
9796                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9797                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9798                         tp->nvram_pagesize = 264;
9799                         break;
9800                 case FLASH_5752VENDOR_ST_M45PE10:
9801                 case FLASH_5752VENDOR_ST_M45PE20:
9802                 case FLASH_5752VENDOR_ST_M45PE40:
9803                         tp->nvram_jedecnum = JEDEC_ST;
9804                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9805                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9806                         tp->nvram_pagesize = 256;
9807                         break;
9808         }
9809 }
9810
9811 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9812 {
9813         u32 nvcfg1, protect = 0;
9814
9815         nvcfg1 = tr32(NVRAM_CFG1);
9816
9817         /* NVRAM protection for TPM */
9818         if (nvcfg1 & (1 << 27)) {
9819                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9820                 protect = 1;
9821         }
9822
9823         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9824         switch (nvcfg1) {
9825                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9826                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9827                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9828                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9829                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9830                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9831                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9832                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9833                         tp->nvram_jedecnum = JEDEC_ATMEL;
9834                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9835                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9836                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9837                         tp->nvram_pagesize = 256;
9838                         break;
9839                 case FLASH_5761VENDOR_ST_A_M45PE20:
9840                 case FLASH_5761VENDOR_ST_A_M45PE40:
9841                 case FLASH_5761VENDOR_ST_A_M45PE80:
9842                 case FLASH_5761VENDOR_ST_A_M45PE16:
9843                 case FLASH_5761VENDOR_ST_M_M45PE20:
9844                 case FLASH_5761VENDOR_ST_M_M45PE40:
9845                 case FLASH_5761VENDOR_ST_M_M45PE80:
9846                 case FLASH_5761VENDOR_ST_M_M45PE16:
9847                         tp->nvram_jedecnum = JEDEC_ST;
9848                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9849                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9850                         tp->nvram_pagesize = 256;
9851                         break;
9852         }
9853
9854         if (protect) {
9855                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9856         } else {
9857                 switch (nvcfg1) {
9858                         case FLASH_5761VENDOR_ATMEL_ADB161D:
9859                         case FLASH_5761VENDOR_ATMEL_MDB161D:
9860                         case FLASH_5761VENDOR_ST_A_M45PE16:
9861                         case FLASH_5761VENDOR_ST_M_M45PE16:
9862                                 tp->nvram_size = 0x100000;
9863                                 break;
9864                         case FLASH_5761VENDOR_ATMEL_ADB081D:
9865                         case FLASH_5761VENDOR_ATMEL_MDB081D:
9866                         case FLASH_5761VENDOR_ST_A_M45PE80:
9867                         case FLASH_5761VENDOR_ST_M_M45PE80:
9868                                 tp->nvram_size = 0x80000;
9869                                 break;
9870                         case FLASH_5761VENDOR_ATMEL_ADB041D:
9871                         case FLASH_5761VENDOR_ATMEL_MDB041D:
9872                         case FLASH_5761VENDOR_ST_A_M45PE40:
9873                         case FLASH_5761VENDOR_ST_M_M45PE40:
9874                                 tp->nvram_size = 0x40000;
9875                                 break;
9876                         case FLASH_5761VENDOR_ATMEL_ADB021D:
9877                         case FLASH_5761VENDOR_ATMEL_MDB021D:
9878                         case FLASH_5761VENDOR_ST_A_M45PE20:
9879                         case FLASH_5761VENDOR_ST_M_M45PE20:
9880                                 tp->nvram_size = 0x20000;
9881                                 break;
9882                 }
9883         }
9884 }
9885
9886 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9887 {
9888         tp->nvram_jedecnum = JEDEC_ATMEL;
9889         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9890         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9891 }
9892
9893 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9894 static void __devinit tg3_nvram_init(struct tg3 *tp)
9895 {
9896         tw32_f(GRC_EEPROM_ADDR,
9897              (EEPROM_ADDR_FSM_RESET |
9898               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9899                EEPROM_ADDR_CLKPERD_SHIFT)));
9900
9901         msleep(1);
9902
9903         /* Enable seeprom accesses. */
9904         tw32_f(GRC_LOCAL_CTRL,
9905              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9906         udelay(100);
9907
9908         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9909             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9910                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9911
9912                 if (tg3_nvram_lock(tp)) {
9913                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9914                                "tg3_nvram_init failed.\n", tp->dev->name);
9915                         return;
9916                 }
9917                 tg3_enable_nvram_access(tp);
9918
9919                 tp->nvram_size = 0;
9920
9921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9922                         tg3_get_5752_nvram_info(tp);
9923                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9924                         tg3_get_5755_nvram_info(tp);
9925                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9926                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
9927                         tg3_get_5787_nvram_info(tp);
9928                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9929                         tg3_get_5761_nvram_info(tp);
9930                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9931                         tg3_get_5906_nvram_info(tp);
9932                 else
9933                         tg3_get_nvram_info(tp);
9934
9935                 if (tp->nvram_size == 0)
9936                         tg3_get_nvram_size(tp);
9937
9938                 tg3_disable_nvram_access(tp);
9939                 tg3_nvram_unlock(tp);
9940
9941         } else {
9942                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9943
9944                 tg3_get_eeprom_size(tp);
9945         }
9946 }
9947
9948 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9949                                         u32 offset, u32 *val)
9950 {
9951         u32 tmp;
9952         int i;
9953
9954         if (offset > EEPROM_ADDR_ADDR_MASK ||
9955             (offset % 4) != 0)
9956                 return -EINVAL;
9957
9958         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9959                                         EEPROM_ADDR_DEVID_MASK |
9960                                         EEPROM_ADDR_READ);
9961         tw32(GRC_EEPROM_ADDR,
9962              tmp |
9963              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9964              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9965               EEPROM_ADDR_ADDR_MASK) |
9966              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9967
9968         for (i = 0; i < 1000; i++) {
9969                 tmp = tr32(GRC_EEPROM_ADDR);
9970
9971                 if (tmp & EEPROM_ADDR_COMPLETE)
9972                         break;
9973                 msleep(1);
9974         }
9975         if (!(tmp & EEPROM_ADDR_COMPLETE))
9976                 return -EBUSY;
9977
9978         *val = tr32(GRC_EEPROM_DATA);
9979         return 0;
9980 }
9981
9982 #define NVRAM_CMD_TIMEOUT 10000
9983
9984 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9985 {
9986         int i;
9987
9988         tw32(NVRAM_CMD, nvram_cmd);
9989         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9990                 udelay(10);
9991                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9992                         udelay(10);
9993                         break;
9994                 }
9995         }
9996         if (i == NVRAM_CMD_TIMEOUT) {
9997                 return -EBUSY;
9998         }
9999         return 0;
10000 }
10001
10002 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10003 {
10004         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10005             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10006             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10007            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10008             (tp->nvram_jedecnum == JEDEC_ATMEL))
10009
10010                 addr = ((addr / tp->nvram_pagesize) <<
10011                         ATMEL_AT45DB0X1B_PAGE_POS) +
10012                        (addr % tp->nvram_pagesize);
10013
10014         return addr;
10015 }
10016
10017 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10018 {
10019         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10020             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10021             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10022            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10023             (tp->nvram_jedecnum == JEDEC_ATMEL))
10024
10025                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10026                         tp->nvram_pagesize) +
10027                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10028
10029         return addr;
10030 }
10031
10032 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10033 {
10034         int ret;
10035
10036         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10037                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10038
10039         offset = tg3_nvram_phys_addr(tp, offset);
10040
10041         if (offset > NVRAM_ADDR_MSK)
10042                 return -EINVAL;
10043
10044         ret = tg3_nvram_lock(tp);
10045         if (ret)
10046                 return ret;
10047
10048         tg3_enable_nvram_access(tp);
10049
10050         tw32(NVRAM_ADDR, offset);
10051         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10052                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10053
10054         if (ret == 0)
10055                 *val = swab32(tr32(NVRAM_RDDATA));
10056
10057         tg3_disable_nvram_access(tp);
10058
10059         tg3_nvram_unlock(tp);
10060
10061         return ret;
10062 }
10063
10064 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10065 {
10066         int err;
10067         u32 tmp;
10068
10069         err = tg3_nvram_read(tp, offset, &tmp);
10070         *val = swab32(tmp);
10071         return err;
10072 }
10073
10074 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10075                                     u32 offset, u32 len, u8 *buf)
10076 {
10077         int i, j, rc = 0;
10078         u32 val;
10079
10080         for (i = 0; i < len; i += 4) {
10081                 u32 addr, data;
10082
10083                 addr = offset + i;
10084
10085                 memcpy(&data, buf + i, 4);
10086
10087                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
10088
10089                 val = tr32(GRC_EEPROM_ADDR);
10090                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10091
10092                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10093                         EEPROM_ADDR_READ);
10094                 tw32(GRC_EEPROM_ADDR, val |
10095                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10096                         (addr & EEPROM_ADDR_ADDR_MASK) |
10097                         EEPROM_ADDR_START |
10098                         EEPROM_ADDR_WRITE);
10099
10100                 for (j = 0; j < 1000; j++) {
10101                         val = tr32(GRC_EEPROM_ADDR);
10102
10103                         if (val & EEPROM_ADDR_COMPLETE)
10104                                 break;
10105                         msleep(1);
10106                 }
10107                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10108                         rc = -EBUSY;
10109                         break;
10110                 }
10111         }
10112
10113         return rc;
10114 }
10115
10116 /* offset and length are dword aligned */
10117 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10118                 u8 *buf)
10119 {
10120         int ret = 0;
10121         u32 pagesize = tp->nvram_pagesize;
10122         u32 pagemask = pagesize - 1;
10123         u32 nvram_cmd;
10124         u8 *tmp;
10125
10126         tmp = kmalloc(pagesize, GFP_KERNEL);
10127         if (tmp == NULL)
10128                 return -ENOMEM;
10129
10130         while (len) {
10131                 int j;
10132                 u32 phy_addr, page_off, size;
10133
10134                 phy_addr = offset & ~pagemask;
10135
10136                 for (j = 0; j < pagesize; j += 4) {
10137                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
10138                                                 (u32 *) (tmp + j))))
10139                                 break;
10140                 }
10141                 if (ret)
10142                         break;
10143
10144                 page_off = offset & pagemask;
10145                 size = pagesize;
10146                 if (len < size)
10147                         size = len;
10148
10149                 len -= size;
10150
10151                 memcpy(tmp + page_off, buf, size);
10152
10153                 offset = offset + (pagesize - page_off);
10154
10155                 tg3_enable_nvram_access(tp);
10156
10157                 /*
10158                  * Before we can erase the flash page, we need
10159                  * to issue a special "write enable" command.
10160                  */
10161                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10162
10163                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10164                         break;
10165
10166                 /* Erase the target page */
10167                 tw32(NVRAM_ADDR, phy_addr);
10168
10169                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10170                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10171
10172                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10173                         break;
10174
10175                 /* Issue another write enable to start the write. */
10176                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10177
10178                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10179                         break;
10180
10181                 for (j = 0; j < pagesize; j += 4) {
10182                         u32 data;
10183
10184                         data = *((u32 *) (tmp + j));
10185                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
10186
10187                         tw32(NVRAM_ADDR, phy_addr + j);
10188
10189                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10190                                 NVRAM_CMD_WR;
10191
10192                         if (j == 0)
10193                                 nvram_cmd |= NVRAM_CMD_FIRST;
10194                         else if (j == (pagesize - 4))
10195                                 nvram_cmd |= NVRAM_CMD_LAST;
10196
10197                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10198                                 break;
10199                 }
10200                 if (ret)
10201                         break;
10202         }
10203
10204         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10205         tg3_nvram_exec_cmd(tp, nvram_cmd);
10206
10207         kfree(tmp);
10208
10209         return ret;
10210 }
10211
10212 /* offset and length are dword aligned */
10213 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10214                 u8 *buf)
10215 {
10216         int i, ret = 0;
10217
10218         for (i = 0; i < len; i += 4, offset += 4) {
10219                 u32 data, page_off, phy_addr, nvram_cmd;
10220
10221                 memcpy(&data, buf + i, 4);
10222                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
10223
10224                 page_off = offset % tp->nvram_pagesize;
10225
10226                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10227
10228                 tw32(NVRAM_ADDR, phy_addr);
10229
10230                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10231
10232                 if ((page_off == 0) || (i == 0))
10233                         nvram_cmd |= NVRAM_CMD_FIRST;
10234                 if (page_off == (tp->nvram_pagesize - 4))
10235                         nvram_cmd |= NVRAM_CMD_LAST;
10236
10237                 if (i == (len - 4))
10238                         nvram_cmd |= NVRAM_CMD_LAST;
10239
10240                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10241                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10242                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10243                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10244                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10245                     (tp->nvram_jedecnum == JEDEC_ST) &&
10246                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10247
10248                         if ((ret = tg3_nvram_exec_cmd(tp,
10249                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10250                                 NVRAM_CMD_DONE)))
10251
10252                                 break;
10253                 }
10254                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10255                         /* We always do complete word writes to eeprom. */
10256                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10257                 }
10258
10259                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10260                         break;
10261         }
10262         return ret;
10263 }
10264
10265 /* offset and length are dword aligned */
10266 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10267 {
10268         int ret;
10269
10270         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10271                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10272                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10273                 udelay(40);
10274         }
10275
10276         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10277                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10278         }
10279         else {
10280                 u32 grc_mode;
10281
10282                 ret = tg3_nvram_lock(tp);
10283                 if (ret)
10284                         return ret;
10285
10286                 tg3_enable_nvram_access(tp);
10287                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10288                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10289                         tw32(NVRAM_WRITE1, 0x406);
10290
10291                 grc_mode = tr32(GRC_MODE);
10292                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10293
10294                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10295                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10296
10297                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10298                                 buf);
10299                 }
10300                 else {
10301                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10302                                 buf);
10303                 }
10304
10305                 grc_mode = tr32(GRC_MODE);
10306                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10307
10308                 tg3_disable_nvram_access(tp);
10309                 tg3_nvram_unlock(tp);
10310         }
10311
10312         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10313                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10314                 udelay(40);
10315         }
10316
10317         return ret;
10318 }
10319
10320 struct subsys_tbl_ent {
10321         u16 subsys_vendor, subsys_devid;
10322         u32 phy_id;
10323 };
10324
10325 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10326         /* Broadcom boards. */
10327         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10328         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10329         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10330         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10331         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10332         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10333         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10334         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10335         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10336         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10337         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10338
10339         /* 3com boards. */
10340         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10341         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10342         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10343         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10344         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10345
10346         /* DELL boards. */
10347         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10348         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10349         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10350         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10351
10352         /* Compaq boards. */
10353         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10354         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10355         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10356         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10357         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10358
10359         /* IBM boards. */
10360         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10361 };
10362
10363 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10364 {
10365         int i;
10366
10367         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10368                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10369                      tp->pdev->subsystem_vendor) &&
10370                     (subsys_id_to_phy_id[i].subsys_devid ==
10371                      tp->pdev->subsystem_device))
10372                         return &subsys_id_to_phy_id[i];
10373         }
10374         return NULL;
10375 }
10376
10377 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10378 {
10379         u32 val;
10380         u16 pmcsr;
10381
10382         /* On some early chips the SRAM cannot be accessed in D3hot state,
10383          * so need make sure we're in D0.
10384          */
10385         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10386         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10387         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10388         msleep(1);
10389
10390         /* Make sure register accesses (indirect or otherwise)
10391          * will function correctly.
10392          */
10393         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10394                                tp->misc_host_ctrl);
10395
10396         /* The memory arbiter has to be enabled in order for SRAM accesses
10397          * to succeed.  Normally on powerup the tg3 chip firmware will make
10398          * sure it is enabled, but other entities such as system netboot
10399          * code might disable it.
10400          */
10401         val = tr32(MEMARB_MODE);
10402         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10403
10404         tp->phy_id = PHY_ID_INVALID;
10405         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10406
10407         /* Assume an onboard device and WOL capable by default.  */
10408         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10409
10410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10411                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10412                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10413                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10414                 }
10415                 val = tr32(VCPU_CFGSHDW);
10416                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10417                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10418                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10419                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10420                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10421                 return;
10422         }
10423
10424         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10425         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10426                 u32 nic_cfg, led_cfg;
10427                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10428                 int eeprom_phy_serdes = 0;
10429
10430                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10431                 tp->nic_sram_data_cfg = nic_cfg;
10432
10433                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10434                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10435                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10436                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10437                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10438                     (ver > 0) && (ver < 0x100))
10439                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10440
10441                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10442                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10443                         eeprom_phy_serdes = 1;
10444
10445                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10446                 if (nic_phy_id != 0) {
10447                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10448                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10449
10450                         eeprom_phy_id  = (id1 >> 16) << 10;
10451                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10452                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10453                 } else
10454                         eeprom_phy_id = 0;
10455
10456                 tp->phy_id = eeprom_phy_id;
10457                 if (eeprom_phy_serdes) {
10458                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10459                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10460                         else
10461                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10462                 }
10463
10464                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10465                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10466                                     SHASTA_EXT_LED_MODE_MASK);
10467                 else
10468                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10469
10470                 switch (led_cfg) {
10471                 default:
10472                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10473                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10474                         break;
10475
10476                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10477                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10478                         break;
10479
10480                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10481                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10482
10483                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10484                          * read on some older 5700/5701 bootcode.
10485                          */
10486                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10487                             ASIC_REV_5700 ||
10488                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10489                             ASIC_REV_5701)
10490                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10491
10492                         break;
10493
10494                 case SHASTA_EXT_LED_SHARED:
10495                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10496                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10497                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10498                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10499                                                  LED_CTRL_MODE_PHY_2);
10500                         break;
10501
10502                 case SHASTA_EXT_LED_MAC:
10503                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10504                         break;
10505
10506                 case SHASTA_EXT_LED_COMBO:
10507                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10508                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10509                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10510                                                  LED_CTRL_MODE_PHY_2);
10511                         break;
10512
10513                 };
10514
10515                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10516                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10517                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10518                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10519
10520                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10521                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10522                         if ((tp->pdev->subsystem_vendor ==
10523                              PCI_VENDOR_ID_ARIMA) &&
10524                             (tp->pdev->subsystem_device == 0x205a ||
10525                              tp->pdev->subsystem_device == 0x2063))
10526                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10527                 } else {
10528                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10529                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10530                 }
10531
10532                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10533                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10534                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10535                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10536                 }
10537                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10538                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10539                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10540                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10541                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10542
10543                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10544                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10545                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10546
10547                 if (cfg2 & (1 << 17))
10548                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10549
10550                 /* serdes signal pre-emphasis in register 0x590 set by */
10551                 /* bootcode if bit 18 is set */
10552                 if (cfg2 & (1 << 18))
10553                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10554
10555                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10556                         u32 cfg3;
10557
10558                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10559                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10560                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10561                 }
10562         }
10563 }
10564
10565 static int __devinit tg3_phy_probe(struct tg3 *tp)
10566 {
10567         u32 hw_phy_id_1, hw_phy_id_2;
10568         u32 hw_phy_id, hw_phy_id_masked;
10569         int err;
10570
10571         /* Reading the PHY ID register can conflict with ASF
10572          * firwmare access to the PHY hardware.
10573          */
10574         err = 0;
10575         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10576             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10577                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10578         } else {
10579                 /* Now read the physical PHY_ID from the chip and verify
10580                  * that it is sane.  If it doesn't look good, we fall back
10581                  * to either the hard-coded table based PHY_ID and failing
10582                  * that the value found in the eeprom area.
10583                  */
10584                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10585                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10586
10587                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10588                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10589                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10590
10591                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10592         }
10593
10594         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10595                 tp->phy_id = hw_phy_id;
10596                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10597                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10598                 else
10599                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10600         } else {
10601                 if (tp->phy_id != PHY_ID_INVALID) {
10602                         /* Do nothing, phy ID already set up in
10603                          * tg3_get_eeprom_hw_cfg().
10604                          */
10605                 } else {
10606                         struct subsys_tbl_ent *p;
10607
10608                         /* No eeprom signature?  Try the hardcoded
10609                          * subsys device table.
10610                          */
10611                         p = lookup_by_subsys(tp);
10612                         if (!p)
10613                                 return -ENODEV;
10614
10615                         tp->phy_id = p->phy_id;
10616                         if (!tp->phy_id ||
10617                             tp->phy_id == PHY_ID_BCM8002)
10618                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10619                 }
10620         }
10621
10622         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10623             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10624             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10625                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10626
10627                 tg3_readphy(tp, MII_BMSR, &bmsr);
10628                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10629                     (bmsr & BMSR_LSTATUS))
10630                         goto skip_phy_reset;
10631
10632                 err = tg3_phy_reset(tp);
10633                 if (err)
10634                         return err;
10635
10636                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10637                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10638                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10639                 tg3_ctrl = 0;
10640                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10641                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10642                                     MII_TG3_CTRL_ADV_1000_FULL);
10643                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10644                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10645                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10646                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10647                 }
10648
10649                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10650                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10651                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10652                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10653                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10654
10655                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10656                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10657
10658                         tg3_writephy(tp, MII_BMCR,
10659                                      BMCR_ANENABLE | BMCR_ANRESTART);
10660                 }
10661                 tg3_phy_set_wirespeed(tp);
10662
10663                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10664                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10665                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10666         }
10667
10668 skip_phy_reset:
10669         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10670                 err = tg3_init_5401phy_dsp(tp);
10671                 if (err)
10672                         return err;
10673         }
10674
10675         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10676                 err = tg3_init_5401phy_dsp(tp);
10677         }
10678
10679         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10680                 tp->link_config.advertising =
10681                         (ADVERTISED_1000baseT_Half |
10682                          ADVERTISED_1000baseT_Full |
10683                          ADVERTISED_Autoneg |
10684                          ADVERTISED_FIBRE);
10685         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10686                 tp->link_config.advertising &=
10687                         ~(ADVERTISED_1000baseT_Half |
10688                           ADVERTISED_1000baseT_Full);
10689
10690         return err;
10691 }
10692
10693 static void __devinit tg3_read_partno(struct tg3 *tp)
10694 {
10695         unsigned char vpd_data[256];
10696         unsigned int i;
10697         u32 magic;
10698
10699         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10700                 goto out_not_found;
10701
10702         if (magic == TG3_EEPROM_MAGIC) {
10703                 for (i = 0; i < 256; i += 4) {
10704                         u32 tmp;
10705
10706                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10707                                 goto out_not_found;
10708
10709                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10710                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10711                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10712                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10713                 }
10714         } else {
10715                 int vpd_cap;
10716
10717                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10718                 for (i = 0; i < 256; i += 4) {
10719                         u32 tmp, j = 0;
10720                         u16 tmp16;
10721
10722                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10723                                               i);
10724                         while (j++ < 100) {
10725                                 pci_read_config_word(tp->pdev, vpd_cap +
10726                                                      PCI_VPD_ADDR, &tmp16);
10727                                 if (tmp16 & 0x8000)
10728                                         break;
10729                                 msleep(1);
10730                         }
10731                         if (!(tmp16 & 0x8000))
10732                                 goto out_not_found;
10733
10734                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10735                                               &tmp);
10736                         tmp = cpu_to_le32(tmp);
10737                         memcpy(&vpd_data[i], &tmp, 4);
10738                 }
10739         }
10740
10741         /* Now parse and find the part number. */
10742         for (i = 0; i < 254; ) {
10743                 unsigned char val = vpd_data[i];
10744                 unsigned int block_end;
10745
10746                 if (val == 0x82 || val == 0x91) {
10747                         i = (i + 3 +
10748                              (vpd_data[i + 1] +
10749                               (vpd_data[i + 2] << 8)));
10750                         continue;
10751                 }
10752
10753                 if (val != 0x90)
10754                         goto out_not_found;
10755
10756                 block_end = (i + 3 +
10757                              (vpd_data[i + 1] +
10758                               (vpd_data[i + 2] << 8)));
10759                 i += 3;
10760
10761                 if (block_end > 256)
10762                         goto out_not_found;
10763
10764                 while (i < (block_end - 2)) {
10765                         if (vpd_data[i + 0] == 'P' &&
10766                             vpd_data[i + 1] == 'N') {
10767                                 int partno_len = vpd_data[i + 2];
10768
10769                                 i += 3;
10770                                 if (partno_len > 24 || (partno_len + i) > 256)
10771                                         goto out_not_found;
10772
10773                                 memcpy(tp->board_part_number,
10774                                        &vpd_data[i], partno_len);
10775
10776                                 /* Success. */
10777                                 return;
10778                         }
10779                         i += 3 + vpd_data[i + 2];
10780                 }
10781
10782                 /* Part number not found. */
10783                 goto out_not_found;
10784         }
10785
10786 out_not_found:
10787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10788                 strcpy(tp->board_part_number, "BCM95906");
10789         else
10790                 strcpy(tp->board_part_number, "none");
10791 }
10792
10793 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10794 {
10795         u32 val, offset, start;
10796
10797         if (tg3_nvram_read_swab(tp, 0, &val))
10798                 return;
10799
10800         if (val != TG3_EEPROM_MAGIC)
10801                 return;
10802
10803         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10804             tg3_nvram_read_swab(tp, 0x4, &start))
10805                 return;
10806
10807         offset = tg3_nvram_logical_addr(tp, offset);
10808         if (tg3_nvram_read_swab(tp, offset, &val))
10809                 return;
10810
10811         if ((val & 0xfc000000) == 0x0c000000) {
10812                 u32 ver_offset, addr;
10813                 int i;
10814
10815                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10816                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10817                         return;
10818
10819                 if (val != 0)
10820                         return;
10821
10822                 addr = offset + ver_offset - start;
10823                 for (i = 0; i < 16; i += 4) {
10824                         if (tg3_nvram_read(tp, addr + i, &val))
10825                                 return;
10826
10827                         val = cpu_to_le32(val);
10828                         memcpy(tp->fw_ver + i, &val, 4);
10829                 }
10830         }
10831 }
10832
10833 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10834
10835 static int __devinit tg3_get_invariants(struct tg3 *tp)
10836 {
10837         static struct pci_device_id write_reorder_chipsets[] = {
10838                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10839                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10840                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10841                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10842                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10843                              PCI_DEVICE_ID_VIA_8385_0) },
10844                 { },
10845         };
10846         u32 misc_ctrl_reg;
10847         u32 cacheline_sz_reg;
10848         u32 pci_state_reg, grc_misc_cfg;
10849         u32 val;
10850         u16 pci_cmd;
10851         int err, pcie_cap;
10852
10853         /* Force memory write invalidate off.  If we leave it on,
10854          * then on 5700_BX chips we have to enable a workaround.
10855          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10856          * to match the cacheline size.  The Broadcom driver have this
10857          * workaround but turns MWI off all the times so never uses
10858          * it.  This seems to suggest that the workaround is insufficient.
10859          */
10860         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10861         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10862         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10863
10864         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10865          * has the register indirect write enable bit set before
10866          * we try to access any of the MMIO registers.  It is also
10867          * critical that the PCI-X hw workaround situation is decided
10868          * before that as well.
10869          */
10870         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10871                               &misc_ctrl_reg);
10872
10873         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10874                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
10876                 u32 prod_id_asic_rev;
10877
10878                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
10879                                       &prod_id_asic_rev);
10880                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
10881         }
10882
10883         /* Wrong chip ID in 5752 A0. This code can be removed later
10884          * as A0 is not in production.
10885          */
10886         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10887                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10888
10889         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10890          * we need to disable memory and use config. cycles
10891          * only to access all registers. The 5702/03 chips
10892          * can mistakenly decode the special cycles from the
10893          * ICH chipsets as memory write cycles, causing corruption
10894          * of register and memory space. Only certain ICH bridges
10895          * will drive special cycles with non-zero data during the
10896          * address phase which can fall within the 5703's address
10897          * range. This is not an ICH bug as the PCI spec allows
10898          * non-zero address during special cycles. However, only
10899          * these ICH bridges are known to drive non-zero addresses
10900          * during special cycles.
10901          *
10902          * Since special cycles do not cross PCI bridges, we only
10903          * enable this workaround if the 5703 is on the secondary
10904          * bus of these ICH bridges.
10905          */
10906         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10907             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10908                 static struct tg3_dev_id {
10909                         u32     vendor;
10910                         u32     device;
10911                         u32     rev;
10912                 } ich_chipsets[] = {
10913                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10914                           PCI_ANY_ID },
10915                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10916                           PCI_ANY_ID },
10917                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10918                           0xa },
10919                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10920                           PCI_ANY_ID },
10921                         { },
10922                 };
10923                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10924                 struct pci_dev *bridge = NULL;
10925
10926                 while (pci_id->vendor != 0) {
10927                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10928                                                 bridge);
10929                         if (!bridge) {
10930                                 pci_id++;
10931                                 continue;
10932                         }
10933                         if (pci_id->rev != PCI_ANY_ID) {
10934                                 if (bridge->revision > pci_id->rev)
10935                                         continue;
10936                         }
10937                         if (bridge->subordinate &&
10938                             (bridge->subordinate->number ==
10939                              tp->pdev->bus->number)) {
10940
10941                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10942                                 pci_dev_put(bridge);
10943                                 break;
10944                         }
10945                 }
10946         }
10947
10948         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10949          * DMA addresses > 40-bit. This bridge may have other additional
10950          * 57xx devices behind it in some 4-port NIC designs for example.
10951          * Any tg3 device found behind the bridge will also need the 40-bit
10952          * DMA workaround.
10953          */
10954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10955             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10956                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10957                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10958                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10959         }
10960         else {
10961                 struct pci_dev *bridge = NULL;
10962
10963                 do {
10964                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10965                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10966                                                 bridge);
10967                         if (bridge && bridge->subordinate &&
10968                             (bridge->subordinate->number <=
10969                              tp->pdev->bus->number) &&
10970                             (bridge->subordinate->subordinate >=
10971                              tp->pdev->bus->number)) {
10972                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10973                                 pci_dev_put(bridge);
10974                                 break;
10975                         }
10976                 } while (bridge);
10977         }
10978
10979         /* Initialize misc host control in PCI block. */
10980         tp->misc_host_ctrl |= (misc_ctrl_reg &
10981                                MISC_HOST_CTRL_CHIPREV);
10982         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10983                                tp->misc_host_ctrl);
10984
10985         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10986                               &cacheline_sz_reg);
10987
10988         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10989         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10990         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10991         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10992
10993         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10994             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10995                 tp->pdev_peer = tg3_find_peer(tp);
10996
10997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10998             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11003             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11004             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11005                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11006
11007         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11008             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11009                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11010
11011         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11012                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11013                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11014                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11015                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11016                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11017                      tp->pdev_peer == tp->pdev))
11018                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11019
11020                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11021                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11022                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11023                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11024                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11025                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11026                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11027                 } else {
11028                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11029                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11030                                 ASIC_REV_5750 &&
11031                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11032                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11033                 }
11034         }
11035
11036         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11037             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11038             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11039             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11040             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11041             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11042             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11043             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11044                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11045
11046         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11047         if (pcie_cap != 0) {
11048                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11049                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11050                         u16 lnkctl;
11051
11052                         pci_read_config_word(tp->pdev,
11053                                              pcie_cap + PCI_EXP_LNKCTL,
11054                                              &lnkctl);
11055                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11056                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11057                 }
11058         }
11059
11060         /* If we have an AMD 762 or VIA K8T800 chipset, write
11061          * reordering to the mailbox registers done by the host
11062          * controller can cause major troubles.  We read back from
11063          * every mailbox register write to force the writes to be
11064          * posted to the chip in order.
11065          */
11066         if (pci_dev_present(write_reorder_chipsets) &&
11067             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11068                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11069
11070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11071             tp->pci_lat_timer < 64) {
11072                 tp->pci_lat_timer = 64;
11073
11074                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11075                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11076                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11077                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11078
11079                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11080                                        cacheline_sz_reg);
11081         }
11082
11083         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11084             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11085                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11086                 if (!tp->pcix_cap) {
11087                         printk(KERN_ERR PFX "Cannot find PCI-X "
11088                                             "capability, aborting.\n");
11089                         return -EIO;
11090                 }
11091         }
11092
11093         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11094                               &pci_state_reg);
11095
11096         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11097                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11098
11099                 /* If this is a 5700 BX chipset, and we are in PCI-X
11100                  * mode, enable register write workaround.
11101                  *
11102                  * The workaround is to use indirect register accesses
11103                  * for all chip writes not to mailbox registers.
11104                  */
11105                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11106                         u32 pm_reg;
11107
11108                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11109
11110                         /* The chip can have it's power management PCI config
11111                          * space registers clobbered due to this bug.
11112                          * So explicitly force the chip into D0 here.
11113                          */
11114                         pci_read_config_dword(tp->pdev,
11115                                               tp->pm_cap + PCI_PM_CTRL,
11116                                               &pm_reg);
11117                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11118                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11119                         pci_write_config_dword(tp->pdev,
11120                                                tp->pm_cap + PCI_PM_CTRL,
11121                                                pm_reg);
11122
11123                         /* Also, force SERR#/PERR# in PCI command. */
11124                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11125                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11126                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11127                 }
11128         }
11129
11130         /* 5700 BX chips need to have their TX producer index mailboxes
11131          * written twice to workaround a bug.
11132          */
11133         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11134                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11135
11136         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11137                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11138         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11139                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11140
11141         /* Chip-specific fixup from Broadcom driver */
11142         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11143             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11144                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11145                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11146         }
11147
11148         /* Default fast path register access methods */
11149         tp->read32 = tg3_read32;
11150         tp->write32 = tg3_write32;
11151         tp->read32_mbox = tg3_read32;
11152         tp->write32_mbox = tg3_write32;
11153         tp->write32_tx_mbox = tg3_write32;
11154         tp->write32_rx_mbox = tg3_write32;
11155
11156         /* Various workaround register access methods */
11157         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11158                 tp->write32 = tg3_write_indirect_reg32;
11159         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11160                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11161                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11162                 /*
11163                  * Back to back register writes can cause problems on these
11164                  * chips, the workaround is to read back all reg writes
11165                  * except those to mailbox regs.
11166                  *
11167                  * See tg3_write_indirect_reg32().
11168                  */
11169                 tp->write32 = tg3_write_flush_reg32;
11170         }
11171
11172
11173         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11174             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11175                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11176                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11177                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11178         }
11179
11180         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11181                 tp->read32 = tg3_read_indirect_reg32;
11182                 tp->write32 = tg3_write_indirect_reg32;
11183                 tp->read32_mbox = tg3_read_indirect_mbox;
11184                 tp->write32_mbox = tg3_write_indirect_mbox;
11185                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11186                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11187
11188                 iounmap(tp->regs);
11189                 tp->regs = NULL;
11190
11191                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11192                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11193                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11194         }
11195         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11196                 tp->read32_mbox = tg3_read32_mbox_5906;
11197                 tp->write32_mbox = tg3_write32_mbox_5906;
11198                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11199                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11200         }
11201
11202         if (tp->write32 == tg3_write_indirect_reg32 ||
11203             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11204              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11205               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11206                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11207
11208         /* Get eeprom hw config before calling tg3_set_power_state().
11209          * In particular, the TG3_FLG2_IS_NIC flag must be
11210          * determined before calling tg3_set_power_state() so that
11211          * we know whether or not to switch out of Vaux power.
11212          * When the flag is set, it means that GPIO1 is used for eeprom
11213          * write protect and also implies that it is a LOM where GPIOs
11214          * are not used to switch power.
11215          */
11216         tg3_get_eeprom_hw_cfg(tp);
11217
11218         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11219                 /* Allow reads and writes to the
11220                  * APE register and memory space.
11221                  */
11222                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11223                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11224                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11225                                        pci_state_reg);
11226         }
11227
11228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11230                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11231
11232         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11233          * GPIO1 driven high will bring 5700's external PHY out of reset.
11234          * It is also used as eeprom write protect on LOMs.
11235          */
11236         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11237         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11238             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11239                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11240                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11241         /* Unused GPIO3 must be driven as output on 5752 because there
11242          * are no pull-up resistors on unused GPIO pins.
11243          */
11244         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11245                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11246
11247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11248                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11249
11250         /* Force the chip into D0. */
11251         err = tg3_set_power_state(tp, PCI_D0);
11252         if (err) {
11253                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11254                        pci_name(tp->pdev));
11255                 return err;
11256         }
11257
11258         /* 5700 B0 chips do not support checksumming correctly due
11259          * to hardware bugs.
11260          */
11261         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11262                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11263
11264         /* Derive initial jumbo mode from MTU assigned in
11265          * ether_setup() via the alloc_etherdev() call
11266          */
11267         if (tp->dev->mtu > ETH_DATA_LEN &&
11268             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11269                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11270
11271         /* Determine WakeOnLan speed to use. */
11272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11273             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11274             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11275             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11276                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11277         } else {
11278                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11279         }
11280
11281         /* A few boards don't want Ethernet@WireSpeed phy feature */
11282         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11283             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11284              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11285              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11286             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11287             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11288                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11289
11290         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11291             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11292                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11293         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11294                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11295
11296         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11297                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11298                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11299                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11300                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11301                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11302                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11303                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11304                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11305                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11306                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11307                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11308         }
11309
11310         tp->coalesce_mode = 0;
11311         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11312             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11313                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11314
11315         /* Initialize MAC MI mode, polling disabled. */
11316         tw32_f(MAC_MI_MODE, tp->mi_mode);
11317         udelay(80);
11318
11319         /* Initialize data/descriptor byte/word swapping. */
11320         val = tr32(GRC_MODE);
11321         val &= GRC_MODE_HOST_STACKUP;
11322         tw32(GRC_MODE, val | tp->grc_mode);
11323
11324         tg3_switch_clocks(tp);
11325
11326         /* Clear this out for sanity. */
11327         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11328
11329         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11330                               &pci_state_reg);
11331         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11332             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11333                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11334
11335                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11336                     chiprevid == CHIPREV_ID_5701_B0 ||
11337                     chiprevid == CHIPREV_ID_5701_B2 ||
11338                     chiprevid == CHIPREV_ID_5701_B5) {
11339                         void __iomem *sram_base;
11340
11341                         /* Write some dummy words into the SRAM status block
11342                          * area, see if it reads back correctly.  If the return
11343                          * value is bad, force enable the PCIX workaround.
11344                          */
11345                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11346
11347                         writel(0x00000000, sram_base);
11348                         writel(0x00000000, sram_base + 4);
11349                         writel(0xffffffff, sram_base + 4);
11350                         if (readl(sram_base) != 0x00000000)
11351                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11352                 }
11353         }
11354
11355         udelay(50);
11356         tg3_nvram_init(tp);
11357
11358         grc_misc_cfg = tr32(GRC_MISC_CFG);
11359         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11360
11361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11362             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11363              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11364                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11365
11366         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11367             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11368                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11369         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11370                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11371                                       HOSTCC_MODE_CLRTICK_TXBD);
11372
11373                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11374                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11375                                        tp->misc_host_ctrl);
11376         }
11377
11378         /* these are limited to 10/100 only */
11379         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11380              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11381             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11382              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11383              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11384               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11385               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11386             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11387              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11388               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11389               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11391                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11392
11393         err = tg3_phy_probe(tp);
11394         if (err) {
11395                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11396                        pci_name(tp->pdev), err);
11397                 /* ... but do not return immediately ... */
11398         }
11399
11400         tg3_read_partno(tp);
11401         tg3_read_fw_ver(tp);
11402
11403         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11404                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11405         } else {
11406                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11407                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11408                 else
11409                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11410         }
11411
11412         /* 5700 {AX,BX} chips have a broken status block link
11413          * change bit implementation, so we must use the
11414          * status register in those cases.
11415          */
11416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11417                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11418         else
11419                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11420
11421         /* The led_ctrl is set during tg3_phy_probe, here we might
11422          * have to force the link status polling mechanism based
11423          * upon subsystem IDs.
11424          */
11425         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11426             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11427             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11428                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11429                                   TG3_FLAG_USE_LINKCHG_REG);
11430         }
11431
11432         /* For all SERDES we poll the MAC status register. */
11433         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11434                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11435         else
11436                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11437
11438         /* All chips before 5787 can get confused if TX buffers
11439          * straddle the 4GB address boundary in some cases.
11440          */
11441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11446                 tp->dev->hard_start_xmit = tg3_start_xmit;
11447         else
11448                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11449
11450         tp->rx_offset = 2;
11451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11452             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11453                 tp->rx_offset = 0;
11454
11455         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11456
11457         /* Increment the rx prod index on the rx std ring by at most
11458          * 8 for these chips to workaround hw errata.
11459          */
11460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11461             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11462             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11463                 tp->rx_std_max_post = 8;
11464
11465         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11466                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11467                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11468
11469         return err;
11470 }
11471
11472 #ifdef CONFIG_SPARC
11473 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11474 {
11475         struct net_device *dev = tp->dev;
11476         struct pci_dev *pdev = tp->pdev;
11477         struct device_node *dp = pci_device_to_OF_node(pdev);
11478         const unsigned char *addr;
11479         int len;
11480
11481         addr = of_get_property(dp, "local-mac-address", &len);
11482         if (addr && len == 6) {
11483                 memcpy(dev->dev_addr, addr, 6);
11484                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11485                 return 0;
11486         }
11487         return -ENODEV;
11488 }
11489
11490 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11491 {
11492         struct net_device *dev = tp->dev;
11493
11494         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11495         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11496         return 0;
11497 }
11498 #endif
11499
11500 static int __devinit tg3_get_device_address(struct tg3 *tp)
11501 {
11502         struct net_device *dev = tp->dev;
11503         u32 hi, lo, mac_offset;
11504         int addr_ok = 0;
11505
11506 #ifdef CONFIG_SPARC
11507         if (!tg3_get_macaddr_sparc(tp))
11508                 return 0;
11509 #endif
11510
11511         mac_offset = 0x7c;
11512         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11513             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11514                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11515                         mac_offset = 0xcc;
11516                 if (tg3_nvram_lock(tp))
11517                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11518                 else
11519                         tg3_nvram_unlock(tp);
11520         }
11521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11522                 mac_offset = 0x10;
11523
11524         /* First try to get it from MAC address mailbox. */
11525         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11526         if ((hi >> 16) == 0x484b) {
11527                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11528                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11529
11530                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11531                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11532                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11533                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11534                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11535
11536                 /* Some old bootcode may report a 0 MAC address in SRAM */
11537                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11538         }
11539         if (!addr_ok) {
11540                 /* Next, try NVRAM. */
11541                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11542                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11543                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11544                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11545                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11546                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11547                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11548                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11549                 }
11550                 /* Finally just fetch it out of the MAC control regs. */
11551                 else {
11552                         hi = tr32(MAC_ADDR_0_HIGH);
11553                         lo = tr32(MAC_ADDR_0_LOW);
11554
11555                         dev->dev_addr[5] = lo & 0xff;
11556                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11557                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11558                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11559                         dev->dev_addr[1] = hi & 0xff;
11560                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11561                 }
11562         }
11563
11564         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11565 #ifdef CONFIG_SPARC64
11566                 if (!tg3_get_default_macaddr_sparc(tp))
11567                         return 0;
11568 #endif
11569                 return -EINVAL;
11570         }
11571         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11572         return 0;
11573 }
11574
11575 #define BOUNDARY_SINGLE_CACHELINE       1
11576 #define BOUNDARY_MULTI_CACHELINE        2
11577
11578 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11579 {
11580         int cacheline_size;
11581         u8 byte;
11582         int goal;
11583
11584         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11585         if (byte == 0)
11586                 cacheline_size = 1024;
11587         else
11588                 cacheline_size = (int) byte * 4;
11589
11590         /* On 5703 and later chips, the boundary bits have no
11591          * effect.
11592          */
11593         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11594             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11595             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11596                 goto out;
11597
11598 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11599         goal = BOUNDARY_MULTI_CACHELINE;
11600 #else
11601 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11602         goal = BOUNDARY_SINGLE_CACHELINE;
11603 #else
11604         goal = 0;
11605 #endif
11606 #endif
11607
11608         if (!goal)
11609                 goto out;
11610
11611         /* PCI controllers on most RISC systems tend to disconnect
11612          * when a device tries to burst across a cache-line boundary.
11613          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11614          *
11615          * Unfortunately, for PCI-E there are only limited
11616          * write-side controls for this, and thus for reads
11617          * we will still get the disconnects.  We'll also waste
11618          * these PCI cycles for both read and write for chips
11619          * other than 5700 and 5701 which do not implement the
11620          * boundary bits.
11621          */
11622         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11623             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11624                 switch (cacheline_size) {
11625                 case 16:
11626                 case 32:
11627                 case 64:
11628                 case 128:
11629                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11630                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11631                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11632                         } else {
11633                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11634                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11635                         }
11636                         break;
11637
11638                 case 256:
11639                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11640                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11641                         break;
11642
11643                 default:
11644                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11645                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11646                         break;
11647                 };
11648         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11649                 switch (cacheline_size) {
11650                 case 16:
11651                 case 32:
11652                 case 64:
11653                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11654                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11655                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11656                                 break;
11657                         }
11658                         /* fallthrough */
11659                 case 128:
11660                 default:
11661                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11662                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11663                         break;
11664                 };
11665         } else {
11666                 switch (cacheline_size) {
11667                 case 16:
11668                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11669                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11670                                         DMA_RWCTRL_WRITE_BNDRY_16);
11671                                 break;
11672                         }
11673                         /* fallthrough */
11674                 case 32:
11675                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11676                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11677                                         DMA_RWCTRL_WRITE_BNDRY_32);
11678                                 break;
11679                         }
11680                         /* fallthrough */
11681                 case 64:
11682                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11683                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11684                                         DMA_RWCTRL_WRITE_BNDRY_64);
11685                                 break;
11686                         }
11687                         /* fallthrough */
11688                 case 128:
11689                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11690                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11691                                         DMA_RWCTRL_WRITE_BNDRY_128);
11692                                 break;
11693                         }
11694                         /* fallthrough */
11695                 case 256:
11696                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11697                                 DMA_RWCTRL_WRITE_BNDRY_256);
11698                         break;
11699                 case 512:
11700                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11701                                 DMA_RWCTRL_WRITE_BNDRY_512);
11702                         break;
11703                 case 1024:
11704                 default:
11705                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11706                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11707                         break;
11708                 };
11709         }
11710
11711 out:
11712         return val;
11713 }
11714
11715 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11716 {
11717         struct tg3_internal_buffer_desc test_desc;
11718         u32 sram_dma_descs;
11719         int i, ret;
11720
11721         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11722
11723         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11724         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11725         tw32(RDMAC_STATUS, 0);
11726         tw32(WDMAC_STATUS, 0);
11727
11728         tw32(BUFMGR_MODE, 0);
11729         tw32(FTQ_RESET, 0);
11730
11731         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11732         test_desc.addr_lo = buf_dma & 0xffffffff;
11733         test_desc.nic_mbuf = 0x00002100;
11734         test_desc.len = size;
11735
11736         /*
11737          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11738          * the *second* time the tg3 driver was getting loaded after an
11739          * initial scan.
11740          *
11741          * Broadcom tells me:
11742          *   ...the DMA engine is connected to the GRC block and a DMA
11743          *   reset may affect the GRC block in some unpredictable way...
11744          *   The behavior of resets to individual blocks has not been tested.
11745          *
11746          * Broadcom noted the GRC reset will also reset all sub-components.
11747          */
11748         if (to_device) {
11749                 test_desc.cqid_sqid = (13 << 8) | 2;
11750
11751                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11752                 udelay(40);
11753         } else {
11754                 test_desc.cqid_sqid = (16 << 8) | 7;
11755
11756                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11757                 udelay(40);
11758         }
11759         test_desc.flags = 0x00000005;
11760
11761         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11762                 u32 val;
11763
11764                 val = *(((u32 *)&test_desc) + i);
11765                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11766                                        sram_dma_descs + (i * sizeof(u32)));
11767                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11768         }
11769         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11770
11771         if (to_device) {
11772                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11773         } else {
11774                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11775         }
11776
11777         ret = -ENODEV;
11778         for (i = 0; i < 40; i++) {
11779                 u32 val;
11780
11781                 if (to_device)
11782                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11783                 else
11784                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11785                 if ((val & 0xffff) == sram_dma_descs) {
11786                         ret = 0;
11787                         break;
11788                 }
11789
11790                 udelay(100);
11791         }
11792
11793         return ret;
11794 }
11795
11796 #define TEST_BUFFER_SIZE        0x2000
11797
11798 static int __devinit tg3_test_dma(struct tg3 *tp)
11799 {
11800         dma_addr_t buf_dma;
11801         u32 *buf, saved_dma_rwctrl;
11802         int ret;
11803
11804         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11805         if (!buf) {
11806                 ret = -ENOMEM;
11807                 goto out_nofree;
11808         }
11809
11810         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11811                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11812
11813         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11814
11815         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11816                 /* DMA read watermark not used on PCIE */
11817                 tp->dma_rwctrl |= 0x00180000;
11818         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11819                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11820                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11821                         tp->dma_rwctrl |= 0x003f0000;
11822                 else
11823                         tp->dma_rwctrl |= 0x003f000f;
11824         } else {
11825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11826                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11827                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11828                         u32 read_water = 0x7;
11829
11830                         /* If the 5704 is behind the EPB bridge, we can
11831                          * do the less restrictive ONE_DMA workaround for
11832                          * better performance.
11833                          */
11834                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11835                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11836                                 tp->dma_rwctrl |= 0x8000;
11837                         else if (ccval == 0x6 || ccval == 0x7)
11838                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11839
11840                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11841                                 read_water = 4;
11842                         /* Set bit 23 to enable PCIX hw bug fix */
11843                         tp->dma_rwctrl |=
11844                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11845                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11846                                 (1 << 23);
11847                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11848                         /* 5780 always in PCIX mode */
11849                         tp->dma_rwctrl |= 0x00144000;
11850                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11851                         /* 5714 always in PCIX mode */
11852                         tp->dma_rwctrl |= 0x00148000;
11853                 } else {
11854                         tp->dma_rwctrl |= 0x001b000f;
11855                 }
11856         }
11857
11858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11860                 tp->dma_rwctrl &= 0xfffffff0;
11861
11862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11863             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11864                 /* Remove this if it causes problems for some boards. */
11865                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11866
11867                 /* On 5700/5701 chips, we need to set this bit.
11868                  * Otherwise the chip will issue cacheline transactions
11869                  * to streamable DMA memory with not all the byte
11870                  * enables turned on.  This is an error on several
11871                  * RISC PCI controllers, in particular sparc64.
11872                  *
11873                  * On 5703/5704 chips, this bit has been reassigned
11874                  * a different meaning.  In particular, it is used
11875                  * on those chips to enable a PCI-X workaround.
11876                  */
11877                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11878         }
11879
11880         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11881
11882 #if 0
11883         /* Unneeded, already done by tg3_get_invariants.  */
11884         tg3_switch_clocks(tp);
11885 #endif
11886
11887         ret = 0;
11888         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11889             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11890                 goto out;
11891
11892         /* It is best to perform DMA test with maximum write burst size
11893          * to expose the 5700/5701 write DMA bug.
11894          */
11895         saved_dma_rwctrl = tp->dma_rwctrl;
11896         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11897         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11898
11899         while (1) {
11900                 u32 *p = buf, i;
11901
11902                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11903                         p[i] = i;
11904
11905                 /* Send the buffer to the chip. */
11906                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11907                 if (ret) {
11908                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11909                         break;
11910                 }
11911
11912 #if 0
11913                 /* validate data reached card RAM correctly. */
11914                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11915                         u32 val;
11916                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11917                         if (le32_to_cpu(val) != p[i]) {
11918                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11919                                 /* ret = -ENODEV here? */
11920                         }
11921                         p[i] = 0;
11922                 }
11923 #endif
11924                 /* Now read it back. */
11925                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11926                 if (ret) {
11927                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11928
11929                         break;
11930                 }
11931
11932                 /* Verify it. */
11933                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11934                         if (p[i] == i)
11935                                 continue;
11936
11937                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11938                             DMA_RWCTRL_WRITE_BNDRY_16) {
11939                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11940                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11941                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11942                                 break;
11943                         } else {
11944                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11945                                 ret = -ENODEV;
11946                                 goto out;
11947                         }
11948                 }
11949
11950                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11951                         /* Success. */
11952                         ret = 0;
11953                         break;
11954                 }
11955         }
11956         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11957             DMA_RWCTRL_WRITE_BNDRY_16) {
11958                 static struct pci_device_id dma_wait_state_chipsets[] = {
11959                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11960                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11961                         { },
11962                 };
11963
11964                 /* DMA test passed without adjusting DMA boundary,
11965                  * now look for chipsets that are known to expose the
11966                  * DMA bug without failing the test.
11967                  */
11968                 if (pci_dev_present(dma_wait_state_chipsets)) {
11969                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11970                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11971                 }
11972                 else
11973                         /* Safe to use the calculated DMA boundary. */
11974                         tp->dma_rwctrl = saved_dma_rwctrl;
11975
11976                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11977         }
11978
11979 out:
11980         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11981 out_nofree:
11982         return ret;
11983 }
11984
11985 static void __devinit tg3_init_link_config(struct tg3 *tp)
11986 {
11987         tp->link_config.advertising =
11988                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11989                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11990                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11991                  ADVERTISED_Autoneg | ADVERTISED_MII);
11992         tp->link_config.speed = SPEED_INVALID;
11993         tp->link_config.duplex = DUPLEX_INVALID;
11994         tp->link_config.autoneg = AUTONEG_ENABLE;
11995         tp->link_config.active_speed = SPEED_INVALID;
11996         tp->link_config.active_duplex = DUPLEX_INVALID;
11997         tp->link_config.phy_is_low_power = 0;
11998         tp->link_config.orig_speed = SPEED_INVALID;
11999         tp->link_config.orig_duplex = DUPLEX_INVALID;
12000         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12001 }
12002
12003 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12004 {
12005         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12006                 tp->bufmgr_config.mbuf_read_dma_low_water =
12007                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12008                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12009                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12010                 tp->bufmgr_config.mbuf_high_water =
12011                         DEFAULT_MB_HIGH_WATER_5705;
12012                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12013                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12014                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12015                         tp->bufmgr_config.mbuf_high_water =
12016                                 DEFAULT_MB_HIGH_WATER_5906;
12017                 }
12018
12019                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12020                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12021                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12022                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12023                 tp->bufmgr_config.mbuf_high_water_jumbo =
12024                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12025         } else {
12026                 tp->bufmgr_config.mbuf_read_dma_low_water =
12027                         DEFAULT_MB_RDMA_LOW_WATER;
12028                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12029                         DEFAULT_MB_MACRX_LOW_WATER;
12030                 tp->bufmgr_config.mbuf_high_water =
12031                         DEFAULT_MB_HIGH_WATER;
12032
12033                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12034                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12035                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12036                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12037                 tp->bufmgr_config.mbuf_high_water_jumbo =
12038                         DEFAULT_MB_HIGH_WATER_JUMBO;
12039         }
12040
12041         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12042         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12043 }
12044
12045 static char * __devinit tg3_phy_string(struct tg3 *tp)
12046 {
12047         switch (tp->phy_id & PHY_ID_MASK) {
12048         case PHY_ID_BCM5400:    return "5400";
12049         case PHY_ID_BCM5401:    return "5401";
12050         case PHY_ID_BCM5411:    return "5411";
12051         case PHY_ID_BCM5701:    return "5701";
12052         case PHY_ID_BCM5703:    return "5703";
12053         case PHY_ID_BCM5704:    return "5704";
12054         case PHY_ID_BCM5705:    return "5705";
12055         case PHY_ID_BCM5750:    return "5750";
12056         case PHY_ID_BCM5752:    return "5752";
12057         case PHY_ID_BCM5714:    return "5714";
12058         case PHY_ID_BCM5780:    return "5780";
12059         case PHY_ID_BCM5755:    return "5755";
12060         case PHY_ID_BCM5787:    return "5787";
12061         case PHY_ID_BCM5784:    return "5784";
12062         case PHY_ID_BCM5756:    return "5722/5756";
12063         case PHY_ID_BCM5906:    return "5906";
12064         case PHY_ID_BCM5761:    return "5761";
12065         case PHY_ID_BCM8002:    return "8002/serdes";
12066         case 0:                 return "serdes";
12067         default:                return "unknown";
12068         };
12069 }
12070
12071 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12072 {
12073         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12074                 strcpy(str, "PCI Express");
12075                 return str;
12076         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12077                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12078
12079                 strcpy(str, "PCIX:");
12080
12081                 if ((clock_ctrl == 7) ||
12082                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12083                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12084                         strcat(str, "133MHz");
12085                 else if (clock_ctrl == 0)
12086                         strcat(str, "33MHz");
12087                 else if (clock_ctrl == 2)
12088                         strcat(str, "50MHz");
12089                 else if (clock_ctrl == 4)
12090                         strcat(str, "66MHz");
12091                 else if (clock_ctrl == 6)
12092                         strcat(str, "100MHz");
12093         } else {
12094                 strcpy(str, "PCI:");
12095                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12096                         strcat(str, "66MHz");
12097                 else
12098                         strcat(str, "33MHz");
12099         }
12100         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12101                 strcat(str, ":32-bit");
12102         else
12103                 strcat(str, ":64-bit");
12104         return str;
12105 }
12106
12107 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12108 {
12109         struct pci_dev *peer;
12110         unsigned int func, devnr = tp->pdev->devfn & ~7;
12111
12112         for (func = 0; func < 8; func++) {
12113                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12114                 if (peer && peer != tp->pdev)
12115                         break;
12116                 pci_dev_put(peer);
12117         }
12118         /* 5704 can be configured in single-port mode, set peer to
12119          * tp->pdev in that case.
12120          */
12121         if (!peer) {
12122                 peer = tp->pdev;
12123                 return peer;
12124         }
12125
12126         /*
12127          * We don't need to keep the refcount elevated; there's no way
12128          * to remove one half of this device without removing the other
12129          */
12130         pci_dev_put(peer);
12131
12132         return peer;
12133 }
12134
12135 static void __devinit tg3_init_coal(struct tg3 *tp)
12136 {
12137         struct ethtool_coalesce *ec = &tp->coal;
12138
12139         memset(ec, 0, sizeof(*ec));
12140         ec->cmd = ETHTOOL_GCOALESCE;
12141         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12142         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12143         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12144         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12145         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12146         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12147         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12148         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12149         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12150
12151         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12152                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12153                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12154                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12155                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12156                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12157         }
12158
12159         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12160                 ec->rx_coalesce_usecs_irq = 0;
12161                 ec->tx_coalesce_usecs_irq = 0;
12162                 ec->stats_block_coalesce_usecs = 0;
12163         }
12164 }
12165
12166 static int __devinit tg3_init_one(struct pci_dev *pdev,
12167                                   const struct pci_device_id *ent)
12168 {
12169         static int tg3_version_printed = 0;
12170         unsigned long tg3reg_base, tg3reg_len;
12171         struct net_device *dev;
12172         struct tg3 *tp;
12173         int i, err, pm_cap;
12174         char str[40];
12175         u64 dma_mask, persist_dma_mask;
12176
12177         if (tg3_version_printed++ == 0)
12178                 printk(KERN_INFO "%s", version);
12179
12180         err = pci_enable_device(pdev);
12181         if (err) {
12182                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12183                        "aborting.\n");
12184                 return err;
12185         }
12186
12187         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12188                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12189                        "base address, aborting.\n");
12190                 err = -ENODEV;
12191                 goto err_out_disable_pdev;
12192         }
12193
12194         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12195         if (err) {
12196                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12197                        "aborting.\n");
12198                 goto err_out_disable_pdev;
12199         }
12200
12201         pci_set_master(pdev);
12202
12203         /* Find power-management capability. */
12204         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12205         if (pm_cap == 0) {
12206                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12207                        "aborting.\n");
12208                 err = -EIO;
12209                 goto err_out_free_res;
12210         }
12211
12212         tg3reg_base = pci_resource_start(pdev, 0);
12213         tg3reg_len = pci_resource_len(pdev, 0);
12214
12215         dev = alloc_etherdev(sizeof(*tp));
12216         if (!dev) {
12217                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12218                 err = -ENOMEM;
12219                 goto err_out_free_res;
12220         }
12221
12222         SET_NETDEV_DEV(dev, &pdev->dev);
12223
12224 #if TG3_VLAN_TAG_USED
12225         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12226         dev->vlan_rx_register = tg3_vlan_rx_register;
12227 #endif
12228
12229         tp = netdev_priv(dev);
12230         tp->pdev = pdev;
12231         tp->dev = dev;
12232         tp->pm_cap = pm_cap;
12233         tp->mac_mode = TG3_DEF_MAC_MODE;
12234         tp->rx_mode = TG3_DEF_RX_MODE;
12235         tp->tx_mode = TG3_DEF_TX_MODE;
12236         tp->mi_mode = MAC_MI_MODE_BASE;
12237         if (tg3_debug > 0)
12238                 tp->msg_enable = tg3_debug;
12239         else
12240                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12241
12242         /* The word/byte swap controls here control register access byte
12243          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12244          * setting below.
12245          */
12246         tp->misc_host_ctrl =
12247                 MISC_HOST_CTRL_MASK_PCI_INT |
12248                 MISC_HOST_CTRL_WORD_SWAP |
12249                 MISC_HOST_CTRL_INDIR_ACCESS |
12250                 MISC_HOST_CTRL_PCISTATE_RW;
12251
12252         /* The NONFRM (non-frame) byte/word swap controls take effect
12253          * on descriptor entries, anything which isn't packet data.
12254          *
12255          * The StrongARM chips on the board (one for tx, one for rx)
12256          * are running in big-endian mode.
12257          */
12258         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12259                         GRC_MODE_WSWAP_NONFRM_DATA);
12260 #ifdef __BIG_ENDIAN
12261         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12262 #endif
12263         spin_lock_init(&tp->lock);
12264         spin_lock_init(&tp->indirect_lock);
12265         INIT_WORK(&tp->reset_task, tg3_reset_task);
12266
12267         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12268         if (!tp->regs) {
12269                 printk(KERN_ERR PFX "Cannot map device registers, "
12270                        "aborting.\n");
12271                 err = -ENOMEM;
12272                 goto err_out_free_dev;
12273         }
12274
12275         tg3_init_link_config(tp);
12276
12277         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12278         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12279         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12280
12281         dev->open = tg3_open;
12282         dev->stop = tg3_close;
12283         dev->get_stats = tg3_get_stats;
12284         dev->set_multicast_list = tg3_set_rx_mode;
12285         dev->set_mac_address = tg3_set_mac_addr;
12286         dev->do_ioctl = tg3_ioctl;
12287         dev->tx_timeout = tg3_tx_timeout;
12288         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12289         dev->ethtool_ops = &tg3_ethtool_ops;
12290         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12291         dev->change_mtu = tg3_change_mtu;
12292         dev->irq = pdev->irq;
12293 #ifdef CONFIG_NET_POLL_CONTROLLER
12294         dev->poll_controller = tg3_poll_controller;
12295 #endif
12296
12297         err = tg3_get_invariants(tp);
12298         if (err) {
12299                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12300                        "aborting.\n");
12301                 goto err_out_iounmap;
12302         }
12303
12304         /* The EPB bridge inside 5714, 5715, and 5780 and any
12305          * device behind the EPB cannot support DMA addresses > 40-bit.
12306          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12307          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12308          * do DMA address check in tg3_start_xmit().
12309          */
12310         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12311                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12312         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12313                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12314 #ifdef CONFIG_HIGHMEM
12315                 dma_mask = DMA_64BIT_MASK;
12316 #endif
12317         } else
12318                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12319
12320         /* Configure DMA attributes. */
12321         if (dma_mask > DMA_32BIT_MASK) {
12322                 err = pci_set_dma_mask(pdev, dma_mask);
12323                 if (!err) {
12324                         dev->features |= NETIF_F_HIGHDMA;
12325                         err = pci_set_consistent_dma_mask(pdev,
12326                                                           persist_dma_mask);
12327                         if (err < 0) {
12328                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12329                                        "DMA for consistent allocations\n");
12330                                 goto err_out_iounmap;
12331                         }
12332                 }
12333         }
12334         if (err || dma_mask == DMA_32BIT_MASK) {
12335                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12336                 if (err) {
12337                         printk(KERN_ERR PFX "No usable DMA configuration, "
12338                                "aborting.\n");
12339                         goto err_out_iounmap;
12340                 }
12341         }
12342
12343         tg3_init_bufmgr_config(tp);
12344
12345         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12346                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12347         }
12348         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12350             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12352             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12353                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12354         } else {
12355                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12356         }
12357
12358         /* TSO is on by default on chips that support hardware TSO.
12359          * Firmware TSO on older chips gives lower performance, so it
12360          * is off by default, but can be enabled using ethtool.
12361          */
12362         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12363                 dev->features |= NETIF_F_TSO;
12364                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12365                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12366                         dev->features |= NETIF_F_TSO6;
12367                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12368                         dev->features |= NETIF_F_TSO_ECN;
12369         }
12370
12371
12372         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12373             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12374             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12375                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12376                 tp->rx_pending = 63;
12377         }
12378
12379         err = tg3_get_device_address(tp);
12380         if (err) {
12381                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12382                        "aborting.\n");
12383                 goto err_out_iounmap;
12384         }
12385
12386         /*
12387          * Reset chip in case UNDI or EFI driver did not shutdown
12388          * DMA self test will enable WDMAC and we'll see (spurious)
12389          * pending DMA on the PCI bus at that point.
12390          */
12391         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12392             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12393                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12394                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12395         }
12396
12397         err = tg3_test_dma(tp);
12398         if (err) {
12399                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12400                 goto err_out_iounmap;
12401         }
12402
12403         /* Tigon3 can do ipv4 only... and some chips have buggy
12404          * checksumming.
12405          */
12406         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12407                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12408                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12409                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12410                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12411                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12412                         dev->features |= NETIF_F_IPV6_CSUM;
12413
12414                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12415         } else
12416                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12417
12418         /* flow control autonegotiation is default behavior */
12419         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12420
12421         tg3_init_coal(tp);
12422
12423         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12424                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12425                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12426                                "base address for APE, aborting.\n");
12427                         err = -ENODEV;
12428                         goto err_out_iounmap;
12429                 }
12430
12431                 tg3reg_base = pci_resource_start(pdev, 2);
12432                 tg3reg_len = pci_resource_len(pdev, 2);
12433
12434                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12435                 if (tp->aperegs == 0UL) {
12436                         printk(KERN_ERR PFX "Cannot map APE registers, "
12437                                "aborting.\n");
12438                         err = -ENOMEM;
12439                         goto err_out_iounmap;
12440                 }
12441
12442                 tg3_ape_lock_init(tp);
12443         }
12444
12445         pci_set_drvdata(pdev, dev);
12446
12447         err = register_netdev(dev);
12448         if (err) {
12449                 printk(KERN_ERR PFX "Cannot register net device, "
12450                        "aborting.\n");
12451                 goto err_out_apeunmap;
12452         }
12453
12454         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
12455                dev->name,
12456                tp->board_part_number,
12457                tp->pci_chip_rev_id,
12458                tg3_phy_string(tp),
12459                tg3_bus_string(tp, str),
12460                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12461                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12462                  "10/100/1000Base-T")));
12463
12464         for (i = 0; i < 6; i++)
12465                 printk("%2.2x%c", dev->dev_addr[i],
12466                        i == 5 ? '\n' : ':');
12467
12468         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12469                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12470                dev->name,
12471                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12472                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12473                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12474                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12475                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12476                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12477         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12478                dev->name, tp->dma_rwctrl,
12479                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12480                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12481
12482         return 0;
12483
12484 err_out_apeunmap:
12485         if (tp->aperegs) {
12486                 iounmap(tp->aperegs);
12487                 tp->aperegs = NULL;
12488         }
12489
12490 err_out_iounmap:
12491         if (tp->regs) {
12492                 iounmap(tp->regs);
12493                 tp->regs = NULL;
12494         }
12495
12496 err_out_free_dev:
12497         free_netdev(dev);
12498
12499 err_out_free_res:
12500         pci_release_regions(pdev);
12501
12502 err_out_disable_pdev:
12503         pci_disable_device(pdev);
12504         pci_set_drvdata(pdev, NULL);
12505         return err;
12506 }
12507
12508 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12509 {
12510         struct net_device *dev = pci_get_drvdata(pdev);
12511
12512         if (dev) {
12513                 struct tg3 *tp = netdev_priv(dev);
12514
12515                 flush_scheduled_work();
12516                 unregister_netdev(dev);
12517                 if (tp->aperegs) {
12518                         iounmap(tp->aperegs);
12519                         tp->aperegs = NULL;
12520                 }
12521                 if (tp->regs) {
12522                         iounmap(tp->regs);
12523                         tp->regs = NULL;
12524                 }
12525                 free_netdev(dev);
12526                 pci_release_regions(pdev);
12527                 pci_disable_device(pdev);
12528                 pci_set_drvdata(pdev, NULL);
12529         }
12530 }
12531
12532 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12533 {
12534         struct net_device *dev = pci_get_drvdata(pdev);
12535         struct tg3 *tp = netdev_priv(dev);
12536         int err;
12537
12538         /* PCI register 4 needs to be saved whether netif_running() or not.
12539          * MSI address and data need to be saved if using MSI and
12540          * netif_running().
12541          */
12542         pci_save_state(pdev);
12543
12544         if (!netif_running(dev))
12545                 return 0;
12546
12547         flush_scheduled_work();
12548         tg3_netif_stop(tp);
12549
12550         del_timer_sync(&tp->timer);
12551
12552         tg3_full_lock(tp, 1);
12553         tg3_disable_ints(tp);
12554         tg3_full_unlock(tp);
12555
12556         netif_device_detach(dev);
12557
12558         tg3_full_lock(tp, 0);
12559         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12560         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12561         tg3_full_unlock(tp);
12562
12563         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12564         if (err) {
12565                 tg3_full_lock(tp, 0);
12566
12567                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12568                 if (tg3_restart_hw(tp, 1))
12569                         goto out;
12570
12571                 tp->timer.expires = jiffies + tp->timer_offset;
12572                 add_timer(&tp->timer);
12573
12574                 netif_device_attach(dev);
12575                 tg3_netif_start(tp);
12576
12577 out:
12578                 tg3_full_unlock(tp);
12579         }
12580
12581         return err;
12582 }
12583
12584 static int tg3_resume(struct pci_dev *pdev)
12585 {
12586         struct net_device *dev = pci_get_drvdata(pdev);
12587         struct tg3 *tp = netdev_priv(dev);
12588         int err;
12589
12590         pci_restore_state(tp->pdev);
12591
12592         if (!netif_running(dev))
12593                 return 0;
12594
12595         err = tg3_set_power_state(tp, PCI_D0);
12596         if (err)
12597                 return err;
12598
12599         /* Hardware bug - MSI won't work if INTX disabled. */
12600         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
12601             (tp->tg3_flags2 & TG3_FLG2_USING_MSI))
12602                 pci_intx(tp->pdev, 1);
12603
12604         netif_device_attach(dev);
12605
12606         tg3_full_lock(tp, 0);
12607
12608         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12609         err = tg3_restart_hw(tp, 1);
12610         if (err)
12611                 goto out;
12612
12613         tp->timer.expires = jiffies + tp->timer_offset;
12614         add_timer(&tp->timer);
12615
12616         tg3_netif_start(tp);
12617
12618 out:
12619         tg3_full_unlock(tp);
12620
12621         return err;
12622 }
12623
12624 static struct pci_driver tg3_driver = {
12625         .name           = DRV_MODULE_NAME,
12626         .id_table       = tg3_pci_tbl,
12627         .probe          = tg3_init_one,
12628         .remove         = __devexit_p(tg3_remove_one),
12629         .suspend        = tg3_suspend,
12630         .resume         = tg3_resume
12631 };
12632
12633 static int __init tg3_init(void)
12634 {
12635         return pci_register_driver(&tg3_driver);
12636 }
12637
12638 static void __exit tg3_cleanup(void)
12639 {
12640         pci_unregister_driver(&tg3_driver);
12641 }
12642
12643 module_init(tg3_init);
12644 module_exit(tg3_cleanup);