2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.63"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/spinlock.h>
53 #include <linux/ethtool.h>
54 #include <linux/timer.h>
55 #include <linux/skbuff.h>
56 #include <linux/mii.h>
57 #include <linux/random.h>
58 #include <linux/init.h>
59 #include <linux/if_vlan.h>
60 #include <linux/dma-mapping.h>
64 #include <asm/uaccess.h>
65 #include <asm/system.h>
68 #define dprintk printk
70 #define dprintk(x...) do { } while (0)
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
80 #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x000600 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x000e00 /* device supports hw statistics version 3 */
92 #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */
93 #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */
94 #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */
95 #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */
96 #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */
97 #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */
98 #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */
99 #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */
100 #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */
103 NvRegIrqStatus = 0x000,
104 #define NVREG_IRQSTAT_MIIEVENT 0x040
105 #define NVREG_IRQSTAT_MASK 0x83ff
106 NvRegIrqMask = 0x004,
107 #define NVREG_IRQ_RX_ERROR 0x0001
108 #define NVREG_IRQ_RX 0x0002
109 #define NVREG_IRQ_RX_NOBUF 0x0004
110 #define NVREG_IRQ_TX_ERR 0x0008
111 #define NVREG_IRQ_TX_OK 0x0010
112 #define NVREG_IRQ_TIMER 0x0020
113 #define NVREG_IRQ_LINK 0x0040
114 #define NVREG_IRQ_RX_FORCED 0x0080
115 #define NVREG_IRQ_TX_FORCED 0x0100
116 #define NVREG_IRQ_RECOVER_ERROR 0x8200
117 #define NVREG_IRQMASK_THROUGHPUT 0x00df
118 #define NVREG_IRQMASK_CPU 0x0060
119 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
120 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
121 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
123 NvRegUnknownSetupReg6 = 0x008,
124 #define NVREG_UNKSETUP6_VAL 3
127 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
128 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
130 NvRegPollingInterval = 0x00c,
131 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
132 #define NVREG_POLL_DEFAULT_CPU 13
133 NvRegMSIMap0 = 0x020,
134 NvRegMSIMap1 = 0x024,
135 NvRegMSIIrqMask = 0x030,
136 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
138 #define NVREG_MISC1_PAUSE_TX 0x01
139 #define NVREG_MISC1_HD 0x02
140 #define NVREG_MISC1_FORCE 0x3b0f3c
142 NvRegMacReset = 0x34,
143 #define NVREG_MAC_RESET_ASSERT 0x0F3
144 NvRegTransmitterControl = 0x084,
145 #define NVREG_XMITCTL_START 0x01
146 #define NVREG_XMITCTL_MGMT_ST 0x40000000
147 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
148 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
149 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
150 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
151 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
152 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
153 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
154 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
155 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
156 #define NVREG_XMITCTL_DATA_START 0x00100000
157 #define NVREG_XMITCTL_DATA_READY 0x00010000
158 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
159 NvRegTransmitterStatus = 0x088,
160 #define NVREG_XMITSTAT_BUSY 0x01
162 NvRegPacketFilterFlags = 0x8c,
163 #define NVREG_PFF_PAUSE_RX 0x08
164 #define NVREG_PFF_ALWAYS 0x7F0000
165 #define NVREG_PFF_PROMISC 0x80
166 #define NVREG_PFF_MYADDR 0x20
167 #define NVREG_PFF_LOOPBACK 0x10
169 NvRegOffloadConfig = 0x90,
170 #define NVREG_OFFLOAD_HOMEPHY 0x601
171 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
172 NvRegReceiverControl = 0x094,
173 #define NVREG_RCVCTL_START 0x01
174 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
175 NvRegReceiverStatus = 0x98,
176 #define NVREG_RCVSTAT_BUSY 0x01
178 NvRegSlotTime = 0x9c,
179 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
180 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
181 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
182 #define NVREG_SLOTTIME_HALF 0x0000ff00
183 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
184 #define NVREG_SLOTTIME_MASK 0x000000ff
186 NvRegTxDeferral = 0xA0,
187 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
188 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
189 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
190 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
191 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
192 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
193 NvRegRxDeferral = 0xA4,
194 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
195 NvRegMacAddrA = 0xA8,
196 NvRegMacAddrB = 0xAC,
197 NvRegMulticastAddrA = 0xB0,
198 #define NVREG_MCASTADDRA_FORCE 0x01
199 NvRegMulticastAddrB = 0xB4,
200 NvRegMulticastMaskA = 0xB8,
201 #define NVREG_MCASTMASKA_NONE 0xffffffff
202 NvRegMulticastMaskB = 0xBC,
203 #define NVREG_MCASTMASKB_NONE 0xffff
205 NvRegPhyInterface = 0xC0,
206 #define PHY_RGMII 0x10000000
207 NvRegBackOffControl = 0xC4,
208 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
209 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
210 #define NVREG_BKOFFCTRL_SELECT 24
211 #define NVREG_BKOFFCTRL_GEAR 12
213 NvRegTxRingPhysAddr = 0x100,
214 NvRegRxRingPhysAddr = 0x104,
215 NvRegRingSizes = 0x108,
216 #define NVREG_RINGSZ_TXSHIFT 0
217 #define NVREG_RINGSZ_RXSHIFT 16
218 NvRegTransmitPoll = 0x10c,
219 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
220 NvRegLinkSpeed = 0x110,
221 #define NVREG_LINKSPEED_FORCE 0x10000
222 #define NVREG_LINKSPEED_10 1000
223 #define NVREG_LINKSPEED_100 100
224 #define NVREG_LINKSPEED_1000 50
225 #define NVREG_LINKSPEED_MASK (0xFFF)
226 NvRegUnknownSetupReg5 = 0x130,
227 #define NVREG_UNKSETUP5_BIT31 (1<<31)
228 NvRegTxWatermark = 0x13c,
229 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
230 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
231 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
232 NvRegTxRxControl = 0x144,
233 #define NVREG_TXRXCTL_KICK 0x0001
234 #define NVREG_TXRXCTL_BIT1 0x0002
235 #define NVREG_TXRXCTL_BIT2 0x0004
236 #define NVREG_TXRXCTL_IDLE 0x0008
237 #define NVREG_TXRXCTL_RESET 0x0010
238 #define NVREG_TXRXCTL_RXCHECK 0x0400
239 #define NVREG_TXRXCTL_DESC_1 0
240 #define NVREG_TXRXCTL_DESC_2 0x002100
241 #define NVREG_TXRXCTL_DESC_3 0xc02200
242 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
243 #define NVREG_TXRXCTL_VLANINS 0x00080
244 NvRegTxRingPhysAddrHigh = 0x148,
245 NvRegRxRingPhysAddrHigh = 0x14C,
246 NvRegTxPauseFrame = 0x170,
247 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
248 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
249 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
250 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
251 NvRegTxPauseFrameLimit = 0x174,
252 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
253 NvRegMIIStatus = 0x180,
254 #define NVREG_MIISTAT_ERROR 0x0001
255 #define NVREG_MIISTAT_LINKCHANGE 0x0008
256 #define NVREG_MIISTAT_MASK_RW 0x0007
257 #define NVREG_MIISTAT_MASK_ALL 0x000f
258 NvRegMIIMask = 0x184,
259 #define NVREG_MII_LINKCHANGE 0x0008
261 NvRegAdapterControl = 0x188,
262 #define NVREG_ADAPTCTL_START 0x02
263 #define NVREG_ADAPTCTL_LINKUP 0x04
264 #define NVREG_ADAPTCTL_PHYVALID 0x40000
265 #define NVREG_ADAPTCTL_RUNNING 0x100000
266 #define NVREG_ADAPTCTL_PHYSHIFT 24
267 NvRegMIISpeed = 0x18c,
268 #define NVREG_MIISPEED_BIT8 (1<<8)
269 #define NVREG_MIIDELAY 5
270 NvRegMIIControl = 0x190,
271 #define NVREG_MIICTL_INUSE 0x08000
272 #define NVREG_MIICTL_WRITE 0x00400
273 #define NVREG_MIICTL_ADDRSHIFT 5
274 NvRegMIIData = 0x194,
275 NvRegTxUnicast = 0x1a0,
276 NvRegTxMulticast = 0x1a4,
277 NvRegTxBroadcast = 0x1a8,
278 NvRegWakeUpFlags = 0x200,
279 #define NVREG_WAKEUPFLAGS_VAL 0x7770
280 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
281 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
282 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
283 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
284 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
285 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
286 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
287 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
288 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
289 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
291 NvRegMgmtUnitGetVersion = 0x204,
292 #define NVREG_MGMTUNITGETVERSION 0x01
293 NvRegMgmtUnitVersion = 0x208,
294 #define NVREG_MGMTUNITVERSION 0x08
295 NvRegPowerCap = 0x268,
296 #define NVREG_POWERCAP_D3SUPP (1<<30)
297 #define NVREG_POWERCAP_D2SUPP (1<<26)
298 #define NVREG_POWERCAP_D1SUPP (1<<25)
299 NvRegPowerState = 0x26c,
300 #define NVREG_POWERSTATE_POWEREDUP 0x8000
301 #define NVREG_POWERSTATE_VALID 0x0100
302 #define NVREG_POWERSTATE_MASK 0x0003
303 #define NVREG_POWERSTATE_D0 0x0000
304 #define NVREG_POWERSTATE_D1 0x0001
305 #define NVREG_POWERSTATE_D2 0x0002
306 #define NVREG_POWERSTATE_D3 0x0003
307 NvRegMgmtUnitControl = 0x278,
308 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
310 NvRegTxZeroReXmt = 0x284,
311 NvRegTxOneReXmt = 0x288,
312 NvRegTxManyReXmt = 0x28c,
313 NvRegTxLateCol = 0x290,
314 NvRegTxUnderflow = 0x294,
315 NvRegTxLossCarrier = 0x298,
316 NvRegTxExcessDef = 0x29c,
317 NvRegTxRetryErr = 0x2a0,
318 NvRegRxFrameErr = 0x2a4,
319 NvRegRxExtraByte = 0x2a8,
320 NvRegRxLateCol = 0x2ac,
322 NvRegRxFrameTooLong = 0x2b4,
323 NvRegRxOverflow = 0x2b8,
324 NvRegRxFCSErr = 0x2bc,
325 NvRegRxFrameAlignErr = 0x2c0,
326 NvRegRxLenErr = 0x2c4,
327 NvRegRxUnicast = 0x2c8,
328 NvRegRxMulticast = 0x2cc,
329 NvRegRxBroadcast = 0x2d0,
331 NvRegTxFrame = 0x2d8,
333 NvRegTxPause = 0x2e0,
334 NvRegRxPause = 0x2e4,
335 NvRegRxDropFrame = 0x2e8,
336 NvRegVlanControl = 0x300,
337 #define NVREG_VLANCONTROL_ENABLE 0x2000
338 NvRegMSIXMap0 = 0x3e0,
339 NvRegMSIXMap1 = 0x3e4,
340 NvRegMSIXIrqStatus = 0x3f0,
342 NvRegPowerState2 = 0x600,
343 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
344 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
345 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
348 /* Big endian: should work, but is untested */
354 struct ring_desc_ex {
362 struct ring_desc* orig;
363 struct ring_desc_ex* ex;
366 #define FLAG_MASK_V1 0xffff0000
367 #define FLAG_MASK_V2 0xffffc000
368 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
369 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
371 #define NV_TX_LASTPACKET (1<<16)
372 #define NV_TX_RETRYERROR (1<<19)
373 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
374 #define NV_TX_FORCED_INTERRUPT (1<<24)
375 #define NV_TX_DEFERRED (1<<26)
376 #define NV_TX_CARRIERLOST (1<<27)
377 #define NV_TX_LATECOLLISION (1<<28)
378 #define NV_TX_UNDERFLOW (1<<29)
379 #define NV_TX_ERROR (1<<30)
380 #define NV_TX_VALID (1<<31)
382 #define NV_TX2_LASTPACKET (1<<29)
383 #define NV_TX2_RETRYERROR (1<<18)
384 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
385 #define NV_TX2_FORCED_INTERRUPT (1<<30)
386 #define NV_TX2_DEFERRED (1<<25)
387 #define NV_TX2_CARRIERLOST (1<<26)
388 #define NV_TX2_LATECOLLISION (1<<27)
389 #define NV_TX2_UNDERFLOW (1<<28)
390 /* error and valid are the same for both */
391 #define NV_TX2_ERROR (1<<30)
392 #define NV_TX2_VALID (1<<31)
393 #define NV_TX2_TSO (1<<28)
394 #define NV_TX2_TSO_SHIFT 14
395 #define NV_TX2_TSO_MAX_SHIFT 14
396 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
397 #define NV_TX2_CHECKSUM_L3 (1<<27)
398 #define NV_TX2_CHECKSUM_L4 (1<<26)
400 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
402 #define NV_RX_DESCRIPTORVALID (1<<16)
403 #define NV_RX_MISSEDFRAME (1<<17)
404 #define NV_RX_SUBSTRACT1 (1<<18)
405 #define NV_RX_ERROR1 (1<<23)
406 #define NV_RX_ERROR2 (1<<24)
407 #define NV_RX_ERROR3 (1<<25)
408 #define NV_RX_ERROR4 (1<<26)
409 #define NV_RX_CRCERR (1<<27)
410 #define NV_RX_OVERFLOW (1<<28)
411 #define NV_RX_FRAMINGERR (1<<29)
412 #define NV_RX_ERROR (1<<30)
413 #define NV_RX_AVAIL (1<<31)
414 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
416 #define NV_RX2_CHECKSUMMASK (0x1C000000)
417 #define NV_RX2_CHECKSUM_IP (0x10000000)
418 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
419 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
420 #define NV_RX2_DESCRIPTORVALID (1<<29)
421 #define NV_RX2_SUBSTRACT1 (1<<25)
422 #define NV_RX2_ERROR1 (1<<18)
423 #define NV_RX2_ERROR2 (1<<19)
424 #define NV_RX2_ERROR3 (1<<20)
425 #define NV_RX2_ERROR4 (1<<21)
426 #define NV_RX2_CRCERR (1<<22)
427 #define NV_RX2_OVERFLOW (1<<23)
428 #define NV_RX2_FRAMINGERR (1<<24)
429 /* error and avail are the same for both */
430 #define NV_RX2_ERROR (1<<30)
431 #define NV_RX2_AVAIL (1<<31)
432 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
434 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
435 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
437 /* Miscelaneous hardware related defines: */
438 #define NV_PCI_REGSZ_VER1 0x270
439 #define NV_PCI_REGSZ_VER2 0x2d4
440 #define NV_PCI_REGSZ_VER3 0x604
441 #define NV_PCI_REGSZ_MAX 0x604
443 /* various timeout delays: all in usec */
444 #define NV_TXRX_RESET_DELAY 4
445 #define NV_TXSTOP_DELAY1 10
446 #define NV_TXSTOP_DELAY1MAX 500000
447 #define NV_TXSTOP_DELAY2 100
448 #define NV_RXSTOP_DELAY1 10
449 #define NV_RXSTOP_DELAY1MAX 500000
450 #define NV_RXSTOP_DELAY2 100
451 #define NV_SETUP5_DELAY 5
452 #define NV_SETUP5_DELAYMAX 50000
453 #define NV_POWERUP_DELAY 5
454 #define NV_POWERUP_DELAYMAX 5000
455 #define NV_MIIBUSY_DELAY 50
456 #define NV_MIIPHY_DELAY 10
457 #define NV_MIIPHY_DELAYMAX 10000
458 #define NV_MAC_RESET_DELAY 64
460 #define NV_WAKEUPPATTERNS 5
461 #define NV_WAKEUPMASKENTRIES 4
463 /* General driver defaults */
464 #define NV_WATCHDOG_TIMEO (5*HZ)
466 #define RX_RING_DEFAULT 128
467 #define TX_RING_DEFAULT 256
468 #define RX_RING_MIN 128
469 #define TX_RING_MIN 64
470 #define RING_MAX_DESC_VER_1 1024
471 #define RING_MAX_DESC_VER_2_3 16384
473 /* rx/tx mac addr + type + vlan + align + slack*/
474 #define NV_RX_HEADERS (64)
475 /* even more slack. */
476 #define NV_RX_ALLOC_PAD (64)
478 /* maximum mtu size */
479 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
480 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
482 #define OOM_REFILL (1+HZ/20)
483 #define POLL_WAIT (1+HZ/100)
484 #define LINK_TIMEOUT (3*HZ)
485 #define STATS_INTERVAL (10*HZ)
489 * The nic supports three different descriptor types:
490 * - DESC_VER_1: Original
491 * - DESC_VER_2: support for jumbo frames.
492 * - DESC_VER_3: 64-bit format.
499 #define PHY_OUI_MARVELL 0x5043
500 #define PHY_OUI_CICADA 0x03f1
501 #define PHY_OUI_VITESSE 0x01c1
502 #define PHY_OUI_REALTEK 0x0732
503 #define PHY_OUI_REALTEK2 0x0020
504 #define PHYID1_OUI_MASK 0x03ff
505 #define PHYID1_OUI_SHFT 6
506 #define PHYID2_OUI_MASK 0xfc00
507 #define PHYID2_OUI_SHFT 10
508 #define PHYID2_MODEL_MASK 0x03f0
509 #define PHY_MODEL_REALTEK_8211 0x0110
510 #define PHY_REV_MASK 0x0001
511 #define PHY_REV_REALTEK_8211B 0x0000
512 #define PHY_REV_REALTEK_8211C 0x0001
513 #define PHY_MODEL_REALTEK_8201 0x0200
514 #define PHY_MODEL_MARVELL_E3016 0x0220
515 #define PHY_MARVELL_E3016_INITMASK 0x0300
516 #define PHY_CICADA_INIT1 0x0f000
517 #define PHY_CICADA_INIT2 0x0e00
518 #define PHY_CICADA_INIT3 0x01000
519 #define PHY_CICADA_INIT4 0x0200
520 #define PHY_CICADA_INIT5 0x0004
521 #define PHY_CICADA_INIT6 0x02000
522 #define PHY_VITESSE_INIT_REG1 0x1f
523 #define PHY_VITESSE_INIT_REG2 0x10
524 #define PHY_VITESSE_INIT_REG3 0x11
525 #define PHY_VITESSE_INIT_REG4 0x12
526 #define PHY_VITESSE_INIT_MSK1 0xc
527 #define PHY_VITESSE_INIT_MSK2 0x0180
528 #define PHY_VITESSE_INIT1 0x52b5
529 #define PHY_VITESSE_INIT2 0xaf8a
530 #define PHY_VITESSE_INIT3 0x8
531 #define PHY_VITESSE_INIT4 0x8f8a
532 #define PHY_VITESSE_INIT5 0xaf86
533 #define PHY_VITESSE_INIT6 0x8f86
534 #define PHY_VITESSE_INIT7 0xaf82
535 #define PHY_VITESSE_INIT8 0x0100
536 #define PHY_VITESSE_INIT9 0x8f82
537 #define PHY_VITESSE_INIT10 0x0
538 #define PHY_REALTEK_INIT_REG1 0x1f
539 #define PHY_REALTEK_INIT_REG2 0x19
540 #define PHY_REALTEK_INIT_REG3 0x13
541 #define PHY_REALTEK_INIT_REG4 0x14
542 #define PHY_REALTEK_INIT_REG5 0x18
543 #define PHY_REALTEK_INIT_REG6 0x11
544 #define PHY_REALTEK_INIT_REG7 0x01
545 #define PHY_REALTEK_INIT1 0x0000
546 #define PHY_REALTEK_INIT2 0x8e00
547 #define PHY_REALTEK_INIT3 0x0001
548 #define PHY_REALTEK_INIT4 0xad17
549 #define PHY_REALTEK_INIT5 0xfb54
550 #define PHY_REALTEK_INIT6 0xf5c7
551 #define PHY_REALTEK_INIT7 0x1000
552 #define PHY_REALTEK_INIT8 0x0003
553 #define PHY_REALTEK_INIT9 0x0008
554 #define PHY_REALTEK_INIT10 0x0005
555 #define PHY_REALTEK_INIT11 0x0200
556 #define PHY_REALTEK_INIT_MSK1 0x0003
558 #define PHY_GIGABIT 0x0100
560 #define PHY_TIMEOUT 0x1
561 #define PHY_ERROR 0x2
565 #define PHY_HALF 0x100
567 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
568 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
569 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
570 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
571 #define NV_PAUSEFRAME_RX_REQ 0x0010
572 #define NV_PAUSEFRAME_TX_REQ 0x0020
573 #define NV_PAUSEFRAME_AUTONEG 0x0040
575 /* MSI/MSI-X defines */
576 #define NV_MSI_X_MAX_VECTORS 8
577 #define NV_MSI_X_VECTORS_MASK 0x000f
578 #define NV_MSI_CAPABLE 0x0010
579 #define NV_MSI_X_CAPABLE 0x0020
580 #define NV_MSI_ENABLED 0x0040
581 #define NV_MSI_X_ENABLED 0x0080
583 #define NV_MSI_X_VECTOR_ALL 0x0
584 #define NV_MSI_X_VECTOR_RX 0x0
585 #define NV_MSI_X_VECTOR_TX 0x1
586 #define NV_MSI_X_VECTOR_OTHER 0x2
588 #define NV_MSI_PRIV_OFFSET 0x68
589 #define NV_MSI_PRIV_VALUE 0xffffffff
591 #define NV_RESTART_TX 0x1
592 #define NV_RESTART_RX 0x2
594 #define NV_TX_LIMIT_COUNT 16
597 struct nv_ethtool_str {
598 char name[ETH_GSTRING_LEN];
601 static const struct nv_ethtool_str nv_estats_str[] = {
606 { "tx_late_collision" },
607 { "tx_fifo_errors" },
608 { "tx_carrier_errors" },
609 { "tx_excess_deferral" },
610 { "tx_retry_error" },
611 { "rx_frame_error" },
613 { "rx_late_collision" },
615 { "rx_frame_too_long" },
616 { "rx_over_errors" },
618 { "rx_frame_align_error" },
619 { "rx_length_error" },
624 { "rx_errors_total" },
625 { "tx_errors_total" },
627 /* version 2 stats */
635 /* version 3 stats */
641 struct nv_ethtool_stats {
646 u64 tx_late_collision;
648 u64 tx_carrier_errors;
649 u64 tx_excess_deferral;
653 u64 rx_late_collision;
655 u64 rx_frame_too_long;
658 u64 rx_frame_align_error;
667 /* version 2 stats */
675 /* version 3 stats */
681 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
682 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
683 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
686 #define NV_TEST_COUNT_BASE 3
687 #define NV_TEST_COUNT_EXTENDED 4
689 static const struct nv_ethtool_str nv_etests_str[] = {
690 { "link (online/offline)" },
691 { "register (offline) " },
692 { "interrupt (offline) " },
693 { "loopback (offline) " }
696 struct register_test {
701 static const struct register_test nv_registers_test[] = {
702 { NvRegUnknownSetupReg6, 0x01 },
703 { NvRegMisc1, 0x03c },
704 { NvRegOffloadConfig, 0x03ff },
705 { NvRegMulticastAddrA, 0xffffffff },
706 { NvRegTxWatermark, 0x0ff },
707 { NvRegWakeUpFlags, 0x07777 },
714 unsigned int dma_len;
715 struct ring_desc_ex *first_tx_desc;
716 struct nv_skb_map *next_tx_ctx;
721 * All hardware access under netdev_priv(dev)->lock, except the performance
723 * - rx is (pseudo-) lockless: it relies on the single-threading provided
724 * by the arch code for interrupts.
725 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
726 * needs netdev_priv(dev)->lock :-(
727 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
730 /* in dev: base, irq */
734 struct net_device *dev;
735 struct napi_struct napi;
738 * Locking: spin_lock(&np->lock); */
739 struct nv_ethtool_stats estats;
747 unsigned int phy_oui;
748 unsigned int phy_model;
749 unsigned int phy_rev;
754 /* General data: RO fields */
755 dma_addr_t ring_addr;
756 struct pci_dev *pci_dev;
773 /* rx specific fields.
774 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
776 union ring_type get_rx, put_rx, first_rx, last_rx;
777 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
778 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
779 struct nv_skb_map *rx_skb;
781 union ring_type rx_ring;
782 unsigned int rx_buf_sz;
783 unsigned int pkt_limit;
784 struct timer_list oom_kick;
785 struct timer_list nic_poll;
786 struct timer_list stats_poll;
790 /* media detection workaround.
791 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
794 unsigned long link_timeout;
796 * tx specific fields.
798 union ring_type get_tx, put_tx, first_tx, last_tx;
799 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
800 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
801 struct nv_skb_map *tx_skb;
803 union ring_type tx_ring;
807 u32 tx_pkts_in_progress;
808 struct nv_skb_map *tx_change_owner;
809 struct nv_skb_map *tx_end_flip;
813 struct vlan_group *vlangrp;
815 /* msi/msi-x fields */
817 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
822 /* power saved state */
823 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
825 /* for different msi-x irq type */
826 char name_rx[IFNAMSIZ + 3]; /* -rx */
827 char name_tx[IFNAMSIZ + 3]; /* -tx */
828 char name_other[IFNAMSIZ + 6]; /* -other */
832 * Maximum number of loops until we assume that a bit in the irq mask
833 * is stuck. Overridable with module param.
835 static int max_interrupt_work = 15;
838 * Optimization can be either throuput mode or cpu mode
840 * Throughput Mode: Every tx and rx packet will generate an interrupt.
841 * CPU Mode: Interrupts are controlled by a timer.
844 NV_OPTIMIZATION_MODE_THROUGHPUT,
845 NV_OPTIMIZATION_MODE_CPU
847 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
850 * Poll interval for timer irq
852 * This interval determines how frequent an interrupt is generated.
853 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
854 * Min = 0, and Max = 65535
856 static int poll_interval = -1;
865 static int msi = NV_MSI_INT_ENABLED;
871 NV_MSIX_INT_DISABLED,
874 static int msix = NV_MSIX_INT_ENABLED;
880 NV_DMA_64BIT_DISABLED,
883 static int dma_64bit = NV_DMA_64BIT_ENABLED;
886 * Crossover Detection
887 * Realtek 8201 phy + some OEM boards do not work properly.
890 NV_CROSSOVER_DETECTION_DISABLED,
891 NV_CROSSOVER_DETECTION_ENABLED
893 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
895 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
897 return netdev_priv(dev);
900 static inline u8 __iomem *get_hwbase(struct net_device *dev)
902 return ((struct fe_priv *)netdev_priv(dev))->base;
905 static inline void pci_push(u8 __iomem *base)
907 /* force out pending posted writes */
911 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
913 return le32_to_cpu(prd->flaglen)
914 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
917 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
919 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
922 static bool nv_optimized(struct fe_priv *np)
924 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
929 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
930 int delay, int delaymax, const char *msg)
932 u8 __iomem *base = get_hwbase(dev);
943 } while ((readl(base + offset) & mask) != target);
947 #define NV_SETUP_RX_RING 0x01
948 #define NV_SETUP_TX_RING 0x02
950 static inline u32 dma_low(dma_addr_t addr)
955 static inline u32 dma_high(dma_addr_t addr)
957 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
960 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
962 struct fe_priv *np = get_nvpriv(dev);
963 u8 __iomem *base = get_hwbase(dev);
965 if (!nv_optimized(np)) {
966 if (rxtx_flags & NV_SETUP_RX_RING) {
967 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
969 if (rxtx_flags & NV_SETUP_TX_RING) {
970 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
973 if (rxtx_flags & NV_SETUP_RX_RING) {
974 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
975 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
977 if (rxtx_flags & NV_SETUP_TX_RING) {
978 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
979 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
984 static void free_rings(struct net_device *dev)
986 struct fe_priv *np = get_nvpriv(dev);
988 if (!nv_optimized(np)) {
989 if (np->rx_ring.orig)
990 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
991 np->rx_ring.orig, np->ring_addr);
994 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
995 np->rx_ring.ex, np->ring_addr);
1003 static int using_multi_irqs(struct net_device *dev)
1005 struct fe_priv *np = get_nvpriv(dev);
1007 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1008 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1009 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1015 static void nv_enable_irq(struct net_device *dev)
1017 struct fe_priv *np = get_nvpriv(dev);
1019 if (!using_multi_irqs(dev)) {
1020 if (np->msi_flags & NV_MSI_X_ENABLED)
1021 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1023 enable_irq(np->pci_dev->irq);
1025 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1026 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1027 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1031 static void nv_disable_irq(struct net_device *dev)
1033 struct fe_priv *np = get_nvpriv(dev);
1035 if (!using_multi_irqs(dev)) {
1036 if (np->msi_flags & NV_MSI_X_ENABLED)
1037 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1039 disable_irq(np->pci_dev->irq);
1041 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1042 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1043 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1047 /* In MSIX mode, a write to irqmask behaves as XOR */
1048 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1050 u8 __iomem *base = get_hwbase(dev);
1052 writel(mask, base + NvRegIrqMask);
1055 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1057 struct fe_priv *np = get_nvpriv(dev);
1058 u8 __iomem *base = get_hwbase(dev);
1060 if (np->msi_flags & NV_MSI_X_ENABLED) {
1061 writel(mask, base + NvRegIrqMask);
1063 if (np->msi_flags & NV_MSI_ENABLED)
1064 writel(0, base + NvRegMSIIrqMask);
1065 writel(0, base + NvRegIrqMask);
1069 static void nv_napi_enable(struct net_device *dev)
1071 #ifdef CONFIG_FORCEDETH_NAPI
1072 struct fe_priv *np = get_nvpriv(dev);
1074 napi_enable(&np->napi);
1078 static void nv_napi_disable(struct net_device *dev)
1080 #ifdef CONFIG_FORCEDETH_NAPI
1081 struct fe_priv *np = get_nvpriv(dev);
1083 napi_disable(&np->napi);
1087 #define MII_READ (-1)
1088 /* mii_rw: read/write a register on the PHY.
1090 * Caller must guarantee serialization
1092 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1094 u8 __iomem *base = get_hwbase(dev);
1098 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1100 reg = readl(base + NvRegMIIControl);
1101 if (reg & NVREG_MIICTL_INUSE) {
1102 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1103 udelay(NV_MIIBUSY_DELAY);
1106 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1107 if (value != MII_READ) {
1108 writel(value, base + NvRegMIIData);
1109 reg |= NVREG_MIICTL_WRITE;
1111 writel(reg, base + NvRegMIIControl);
1113 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1114 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1115 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1116 dev->name, miireg, addr);
1118 } else if (value != MII_READ) {
1119 /* it was a write operation - fewer failures are detectable */
1120 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1121 dev->name, value, miireg, addr);
1123 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1124 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1125 dev->name, miireg, addr);
1128 retval = readl(base + NvRegMIIData);
1129 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1130 dev->name, miireg, addr, retval);
1136 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1138 struct fe_priv *np = netdev_priv(dev);
1140 unsigned int tries = 0;
1142 miicontrol = BMCR_RESET | bmcr_setup;
1143 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1147 /* wait for 500ms */
1150 /* must wait till reset is deasserted */
1151 while (miicontrol & BMCR_RESET) {
1153 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1154 /* FIXME: 100 tries seem excessive */
1161 static int phy_init(struct net_device *dev)
1163 struct fe_priv *np = get_nvpriv(dev);
1164 u8 __iomem *base = get_hwbase(dev);
1165 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1167 /* phy errata for E3016 phy */
1168 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1169 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1170 reg &= ~PHY_MARVELL_E3016_INITMASK;
1171 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1172 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1176 if (np->phy_oui == PHY_OUI_REALTEK) {
1177 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1178 np->phy_rev == PHY_REV_REALTEK_8211B) {
1179 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1180 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1183 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1184 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1187 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1188 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1191 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1192 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1195 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1196 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1199 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1200 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1203 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1204 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1208 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1209 np->phy_rev == PHY_REV_REALTEK_8211C) {
1210 u32 powerstate = readl(base + NvRegPowerState2);
1212 /* need to perform hw phy reset */
1213 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1214 writel(powerstate, base + NvRegPowerState2);
1217 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1218 writel(powerstate, base + NvRegPowerState2);
1221 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1222 reg |= PHY_REALTEK_INIT9;
1223 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1227 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1228 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1231 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1232 if (!(reg & PHY_REALTEK_INIT11)) {
1233 reg |= PHY_REALTEK_INIT11;
1234 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1235 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1240 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1244 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1245 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1246 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1247 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1248 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1249 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1250 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1251 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1252 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1253 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1254 phy_reserved |= PHY_REALTEK_INIT7;
1255 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1256 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1263 /* set advertise register */
1264 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1265 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1266 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1267 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1271 /* get phy interface type */
1272 phyinterface = readl(base + NvRegPhyInterface);
1274 /* see if gigabit phy */
1275 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1276 if (mii_status & PHY_GIGABIT) {
1277 np->gigabit = PHY_GIGABIT;
1278 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1279 mii_control_1000 &= ~ADVERTISE_1000HALF;
1280 if (phyinterface & PHY_RGMII)
1281 mii_control_1000 |= ADVERTISE_1000FULL;
1283 mii_control_1000 &= ~ADVERTISE_1000FULL;
1285 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1286 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1293 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1294 mii_control |= BMCR_ANENABLE;
1296 if (np->phy_oui == PHY_OUI_REALTEK &&
1297 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1298 np->phy_rev == PHY_REV_REALTEK_8211C) {
1299 /* start autoneg since we already performed hw reset above */
1300 mii_control |= BMCR_ANRESTART;
1301 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1302 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1307 * (certain phys need bmcr to be setup with reset)
1309 if (phy_reset(dev, mii_control)) {
1310 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1315 /* phy vendor specific configuration */
1316 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1317 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1318 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1319 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1320 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1321 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1324 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1325 phy_reserved |= PHY_CICADA_INIT5;
1326 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1327 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1331 if (np->phy_oui == PHY_OUI_CICADA) {
1332 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1333 phy_reserved |= PHY_CICADA_INIT6;
1334 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1335 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1339 if (np->phy_oui == PHY_OUI_VITESSE) {
1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1341 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1344 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1345 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1348 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1349 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1350 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1353 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1354 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1355 phy_reserved |= PHY_VITESSE_INIT3;
1356 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1357 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1360 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1361 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1364 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1365 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1368 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1369 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1370 phy_reserved |= PHY_VITESSE_INIT3;
1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1380 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1381 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1384 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1385 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1388 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1389 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1390 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1393 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1394 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1395 phy_reserved |= PHY_VITESSE_INIT8;
1396 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1397 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1400 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1401 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1404 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1405 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1409 if (np->phy_oui == PHY_OUI_REALTEK) {
1410 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1411 np->phy_rev == PHY_REV_REALTEK_8211B) {
1412 /* reset could have cleared these out, set them back */
1413 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1414 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1417 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1418 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1421 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1422 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1426 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1430 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1433 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1434 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1437 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1438 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1442 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1443 if (np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
1444 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
1445 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
1446 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
1447 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
1448 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
1449 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
1450 np->device_id == PCI_DEVICE_ID_NVIDIA_NVENET_39) {
1451 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1452 phy_reserved |= PHY_REALTEK_INIT7;
1453 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1454 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1459 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1460 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1463 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1464 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1465 phy_reserved |= PHY_REALTEK_INIT3;
1466 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1467 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1470 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1471 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1478 /* some phys clear out pause advertisment on reset, set it back */
1479 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1481 /* restart auto negotiation, power down phy */
1482 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1483 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
1484 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1491 static void nv_start_rx(struct net_device *dev)
1493 struct fe_priv *np = netdev_priv(dev);
1494 u8 __iomem *base = get_hwbase(dev);
1495 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1497 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1498 /* Already running? Stop it. */
1499 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1500 rx_ctrl &= ~NVREG_RCVCTL_START;
1501 writel(rx_ctrl, base + NvRegReceiverControl);
1504 writel(np->linkspeed, base + NvRegLinkSpeed);
1506 rx_ctrl |= NVREG_RCVCTL_START;
1508 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1509 writel(rx_ctrl, base + NvRegReceiverControl);
1510 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1511 dev->name, np->duplex, np->linkspeed);
1515 static void nv_stop_rx(struct net_device *dev)
1517 struct fe_priv *np = netdev_priv(dev);
1518 u8 __iomem *base = get_hwbase(dev);
1519 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1521 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1522 if (!np->mac_in_use)
1523 rx_ctrl &= ~NVREG_RCVCTL_START;
1525 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1526 writel(rx_ctrl, base + NvRegReceiverControl);
1527 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1528 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1529 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1531 udelay(NV_RXSTOP_DELAY2);
1532 if (!np->mac_in_use)
1533 writel(0, base + NvRegLinkSpeed);
1536 static void nv_start_tx(struct net_device *dev)
1538 struct fe_priv *np = netdev_priv(dev);
1539 u8 __iomem *base = get_hwbase(dev);
1540 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1542 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1543 tx_ctrl |= NVREG_XMITCTL_START;
1545 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1546 writel(tx_ctrl, base + NvRegTransmitterControl);
1550 static void nv_stop_tx(struct net_device *dev)
1552 struct fe_priv *np = netdev_priv(dev);
1553 u8 __iomem *base = get_hwbase(dev);
1554 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1556 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1557 if (!np->mac_in_use)
1558 tx_ctrl &= ~NVREG_XMITCTL_START;
1560 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1561 writel(tx_ctrl, base + NvRegTransmitterControl);
1562 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1563 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1564 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1566 udelay(NV_TXSTOP_DELAY2);
1567 if (!np->mac_in_use)
1568 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1569 base + NvRegTransmitPoll);
1572 static void nv_start_rxtx(struct net_device *dev)
1578 static void nv_stop_rxtx(struct net_device *dev)
1584 static void nv_txrx_reset(struct net_device *dev)
1586 struct fe_priv *np = netdev_priv(dev);
1587 u8 __iomem *base = get_hwbase(dev);
1589 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1590 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1592 udelay(NV_TXRX_RESET_DELAY);
1593 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1597 static void nv_mac_reset(struct net_device *dev)
1599 struct fe_priv *np = netdev_priv(dev);
1600 u8 __iomem *base = get_hwbase(dev);
1601 u32 temp1, temp2, temp3;
1603 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1605 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1608 /* save registers since they will be cleared on reset */
1609 temp1 = readl(base + NvRegMacAddrA);
1610 temp2 = readl(base + NvRegMacAddrB);
1611 temp3 = readl(base + NvRegTransmitPoll);
1613 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1615 udelay(NV_MAC_RESET_DELAY);
1616 writel(0, base + NvRegMacReset);
1618 udelay(NV_MAC_RESET_DELAY);
1620 /* restore saved registers */
1621 writel(temp1, base + NvRegMacAddrA);
1622 writel(temp2, base + NvRegMacAddrB);
1623 writel(temp3, base + NvRegTransmitPoll);
1625 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1629 static void nv_get_hw_stats(struct net_device *dev)
1631 struct fe_priv *np = netdev_priv(dev);
1632 u8 __iomem *base = get_hwbase(dev);
1634 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1635 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1636 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1637 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1638 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1639 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1640 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1641 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1642 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1643 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1644 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1645 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1646 np->estats.rx_runt += readl(base + NvRegRxRunt);
1647 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1648 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1649 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1650 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1651 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1652 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1653 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1654 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1655 np->estats.rx_packets =
1656 np->estats.rx_unicast +
1657 np->estats.rx_multicast +
1658 np->estats.rx_broadcast;
1659 np->estats.rx_errors_total =
1660 np->estats.rx_crc_errors +
1661 np->estats.rx_over_errors +
1662 np->estats.rx_frame_error +
1663 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1664 np->estats.rx_late_collision +
1665 np->estats.rx_runt +
1666 np->estats.rx_frame_too_long;
1667 np->estats.tx_errors_total =
1668 np->estats.tx_late_collision +
1669 np->estats.tx_fifo_errors +
1670 np->estats.tx_carrier_errors +
1671 np->estats.tx_excess_deferral +
1672 np->estats.tx_retry_error;
1674 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1675 np->estats.tx_deferral += readl(base + NvRegTxDef);
1676 np->estats.tx_packets += readl(base + NvRegTxFrame);
1677 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1678 np->estats.tx_pause += readl(base + NvRegTxPause);
1679 np->estats.rx_pause += readl(base + NvRegRxPause);
1680 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1683 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1684 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1685 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1686 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1691 * nv_get_stats: dev->get_stats function
1692 * Get latest stats value from the nic.
1693 * Called with read_lock(&dev_base_lock) held for read -
1694 * only synchronized against unregister_netdevice.
1696 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1698 struct fe_priv *np = netdev_priv(dev);
1700 /* If the nic supports hw counters then retrieve latest values */
1701 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1702 nv_get_hw_stats(dev);
1704 /* copy to net_device stats */
1705 dev->stats.tx_bytes = np->estats.tx_bytes;
1706 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1707 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1708 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1709 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1710 dev->stats.rx_errors = np->estats.rx_errors_total;
1711 dev->stats.tx_errors = np->estats.tx_errors_total;
1718 * nv_alloc_rx: fill rx ring entries.
1719 * Return 1 if the allocations for the skbs failed and the
1720 * rx engine is without Available descriptors
1722 static int nv_alloc_rx(struct net_device *dev)
1724 struct fe_priv *np = netdev_priv(dev);
1725 struct ring_desc* less_rx;
1727 less_rx = np->get_rx.orig;
1728 if (less_rx-- == np->first_rx.orig)
1729 less_rx = np->last_rx.orig;
1731 while (np->put_rx.orig != less_rx) {
1732 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1734 np->put_rx_ctx->skb = skb;
1735 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1738 PCI_DMA_FROMDEVICE);
1739 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1740 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1742 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1743 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1744 np->put_rx.orig = np->first_rx.orig;
1745 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1746 np->put_rx_ctx = np->first_rx_ctx;
1754 static int nv_alloc_rx_optimized(struct net_device *dev)
1756 struct fe_priv *np = netdev_priv(dev);
1757 struct ring_desc_ex* less_rx;
1759 less_rx = np->get_rx.ex;
1760 if (less_rx-- == np->first_rx.ex)
1761 less_rx = np->last_rx.ex;
1763 while (np->put_rx.ex != less_rx) {
1764 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1766 np->put_rx_ctx->skb = skb;
1767 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1770 PCI_DMA_FROMDEVICE);
1771 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1772 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1773 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1775 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1776 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1777 np->put_rx.ex = np->first_rx.ex;
1778 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1779 np->put_rx_ctx = np->first_rx_ctx;
1787 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1788 #ifdef CONFIG_FORCEDETH_NAPI
1789 static void nv_do_rx_refill(unsigned long data)
1791 struct net_device *dev = (struct net_device *) data;
1792 struct fe_priv *np = netdev_priv(dev);
1794 /* Just reschedule NAPI rx processing */
1795 napi_schedule(&np->napi);
1798 static void nv_do_rx_refill(unsigned long data)
1800 struct net_device *dev = (struct net_device *) data;
1801 struct fe_priv *np = netdev_priv(dev);
1804 if (!using_multi_irqs(dev)) {
1805 if (np->msi_flags & NV_MSI_X_ENABLED)
1806 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1808 disable_irq(np->pci_dev->irq);
1810 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1812 if (!nv_optimized(np))
1813 retcode = nv_alloc_rx(dev);
1815 retcode = nv_alloc_rx_optimized(dev);
1817 spin_lock_irq(&np->lock);
1818 if (!np->in_shutdown)
1819 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1820 spin_unlock_irq(&np->lock);
1822 if (!using_multi_irqs(dev)) {
1823 if (np->msi_flags & NV_MSI_X_ENABLED)
1824 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1826 enable_irq(np->pci_dev->irq);
1828 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1833 static void nv_init_rx(struct net_device *dev)
1835 struct fe_priv *np = netdev_priv(dev);
1838 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1840 if (!nv_optimized(np))
1841 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1843 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1844 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1845 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1847 for (i = 0; i < np->rx_ring_size; i++) {
1848 if (!nv_optimized(np)) {
1849 np->rx_ring.orig[i].flaglen = 0;
1850 np->rx_ring.orig[i].buf = 0;
1852 np->rx_ring.ex[i].flaglen = 0;
1853 np->rx_ring.ex[i].txvlan = 0;
1854 np->rx_ring.ex[i].bufhigh = 0;
1855 np->rx_ring.ex[i].buflow = 0;
1857 np->rx_skb[i].skb = NULL;
1858 np->rx_skb[i].dma = 0;
1862 static void nv_init_tx(struct net_device *dev)
1864 struct fe_priv *np = netdev_priv(dev);
1867 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1869 if (!nv_optimized(np))
1870 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1872 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1873 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1874 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1875 np->tx_pkts_in_progress = 0;
1876 np->tx_change_owner = NULL;
1877 np->tx_end_flip = NULL;
1879 for (i = 0; i < np->tx_ring_size; i++) {
1880 if (!nv_optimized(np)) {
1881 np->tx_ring.orig[i].flaglen = 0;
1882 np->tx_ring.orig[i].buf = 0;
1884 np->tx_ring.ex[i].flaglen = 0;
1885 np->tx_ring.ex[i].txvlan = 0;
1886 np->tx_ring.ex[i].bufhigh = 0;
1887 np->tx_ring.ex[i].buflow = 0;
1889 np->tx_skb[i].skb = NULL;
1890 np->tx_skb[i].dma = 0;
1891 np->tx_skb[i].dma_len = 0;
1892 np->tx_skb[i].first_tx_desc = NULL;
1893 np->tx_skb[i].next_tx_ctx = NULL;
1897 static int nv_init_ring(struct net_device *dev)
1899 struct fe_priv *np = netdev_priv(dev);
1904 if (!nv_optimized(np))
1905 return nv_alloc_rx(dev);
1907 return nv_alloc_rx_optimized(dev);
1910 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1912 struct fe_priv *np = netdev_priv(dev);
1915 pci_unmap_page(np->pci_dev, tx_skb->dma,
1921 dev_kfree_skb_any(tx_skb->skb);
1929 static void nv_drain_tx(struct net_device *dev)
1931 struct fe_priv *np = netdev_priv(dev);
1934 for (i = 0; i < np->tx_ring_size; i++) {
1935 if (!nv_optimized(np)) {
1936 np->tx_ring.orig[i].flaglen = 0;
1937 np->tx_ring.orig[i].buf = 0;
1939 np->tx_ring.ex[i].flaglen = 0;
1940 np->tx_ring.ex[i].txvlan = 0;
1941 np->tx_ring.ex[i].bufhigh = 0;
1942 np->tx_ring.ex[i].buflow = 0;
1944 if (nv_release_txskb(dev, &np->tx_skb[i]))
1945 dev->stats.tx_dropped++;
1946 np->tx_skb[i].dma = 0;
1947 np->tx_skb[i].dma_len = 0;
1948 np->tx_skb[i].first_tx_desc = NULL;
1949 np->tx_skb[i].next_tx_ctx = NULL;
1951 np->tx_pkts_in_progress = 0;
1952 np->tx_change_owner = NULL;
1953 np->tx_end_flip = NULL;
1956 static void nv_drain_rx(struct net_device *dev)
1958 struct fe_priv *np = netdev_priv(dev);
1961 for (i = 0; i < np->rx_ring_size; i++) {
1962 if (!nv_optimized(np)) {
1963 np->rx_ring.orig[i].flaglen = 0;
1964 np->rx_ring.orig[i].buf = 0;
1966 np->rx_ring.ex[i].flaglen = 0;
1967 np->rx_ring.ex[i].txvlan = 0;
1968 np->rx_ring.ex[i].bufhigh = 0;
1969 np->rx_ring.ex[i].buflow = 0;
1972 if (np->rx_skb[i].skb) {
1973 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1974 (skb_end_pointer(np->rx_skb[i].skb) -
1975 np->rx_skb[i].skb->data),
1976 PCI_DMA_FROMDEVICE);
1977 dev_kfree_skb(np->rx_skb[i].skb);
1978 np->rx_skb[i].skb = NULL;
1983 static void nv_drain_rxtx(struct net_device *dev)
1989 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1991 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1994 static void nv_legacybackoff_reseed(struct net_device *dev)
1996 u8 __iomem *base = get_hwbase(dev);
2001 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2002 get_random_bytes(&low, sizeof(low));
2003 reg |= low & NVREG_SLOTTIME_MASK;
2005 /* Need to stop tx before change takes effect.
2006 * Caller has already gained np->lock.
2008 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2012 writel(reg, base + NvRegSlotTime);
2018 /* Gear Backoff Seeds */
2019 #define BACKOFF_SEEDSET_ROWS 8
2020 #define BACKOFF_SEEDSET_LFSRS 15
2022 /* Known Good seed sets */
2023 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2024 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2025 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2026 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2027 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2028 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2029 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2030 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2031 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
2033 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2037 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2038 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2039 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2040 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2041 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
2043 static void nv_gear_backoff_reseed(struct net_device *dev)
2045 u8 __iomem *base = get_hwbase(dev);
2046 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2047 u32 temp, seedset, combinedSeed;
2050 /* Setup seed for free running LFSR */
2051 /* We are going to read the time stamp counter 3 times
2052 and swizzle bits around to increase randomness */
2053 get_random_bytes(&miniseed1, sizeof(miniseed1));
2054 miniseed1 &= 0x0fff;
2058 get_random_bytes(&miniseed2, sizeof(miniseed2));
2059 miniseed2 &= 0x0fff;
2062 miniseed2_reversed =
2063 ((miniseed2 & 0xF00) >> 8) |
2064 (miniseed2 & 0x0F0) |
2065 ((miniseed2 & 0x00F) << 8);
2067 get_random_bytes(&miniseed3, sizeof(miniseed3));
2068 miniseed3 &= 0x0fff;
2071 miniseed3_reversed =
2072 ((miniseed3 & 0xF00) >> 8) |
2073 (miniseed3 & 0x0F0) |
2074 ((miniseed3 & 0x00F) << 8);
2076 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2077 (miniseed2 ^ miniseed3_reversed);
2079 /* Seeds can not be zero */
2080 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2081 combinedSeed |= 0x08;
2082 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2083 combinedSeed |= 0x8000;
2085 /* No need to disable tx here */
2086 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2087 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2088 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2089 writel(temp,base + NvRegBackOffControl);
2091 /* Setup seeds for all gear LFSRs. */
2092 get_random_bytes(&seedset, sizeof(seedset));
2093 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2094 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
2096 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2097 temp |= main_seedset[seedset][i-1] & 0x3ff;
2098 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2099 writel(temp, base + NvRegBackOffControl);
2104 * nv_start_xmit: dev->hard_start_xmit function
2105 * Called with netif_tx_lock held.
2107 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2109 struct fe_priv *np = netdev_priv(dev);
2111 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2112 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2116 u32 size = skb->len-skb->data_len;
2117 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2119 struct ring_desc* put_tx;
2120 struct ring_desc* start_tx;
2121 struct ring_desc* prev_tx;
2122 struct nv_skb_map* prev_tx_ctx;
2123 unsigned long flags;
2125 /* add fragments to entries count */
2126 for (i = 0; i < fragments; i++) {
2127 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2128 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2131 spin_lock_irqsave(&np->lock, flags);
2132 empty_slots = nv_get_empty_tx_slots(np);
2133 if (unlikely(empty_slots <= entries)) {
2134 netif_stop_queue(dev);
2136 spin_unlock_irqrestore(&np->lock, flags);
2137 return NETDEV_TX_BUSY;
2139 spin_unlock_irqrestore(&np->lock, flags);
2141 start_tx = put_tx = np->put_tx.orig;
2143 /* setup the header buffer */
2146 prev_tx_ctx = np->put_tx_ctx;
2147 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2148 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2150 np->put_tx_ctx->dma_len = bcnt;
2151 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2152 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2154 tx_flags = np->tx_flags;
2157 if (unlikely(put_tx++ == np->last_tx.orig))
2158 put_tx = np->first_tx.orig;
2159 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2160 np->put_tx_ctx = np->first_tx_ctx;
2163 /* setup the fragments */
2164 for (i = 0; i < fragments; i++) {
2165 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2166 u32 size = frag->size;
2171 prev_tx_ctx = np->put_tx_ctx;
2172 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2173 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2175 np->put_tx_ctx->dma_len = bcnt;
2176 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2177 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2181 if (unlikely(put_tx++ == np->last_tx.orig))
2182 put_tx = np->first_tx.orig;
2183 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2184 np->put_tx_ctx = np->first_tx_ctx;
2188 /* set last fragment flag */
2189 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2191 /* save skb in this slot's context area */
2192 prev_tx_ctx->skb = skb;
2194 if (skb_is_gso(skb))
2195 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2197 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2198 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2200 spin_lock_irqsave(&np->lock, flags);
2203 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2204 np->put_tx.orig = put_tx;
2206 spin_unlock_irqrestore(&np->lock, flags);
2208 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2209 dev->name, entries, tx_flags_extra);
2212 for (j=0; j<64; j++) {
2214 dprintk("\n%03x:", j);
2215 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2220 dev->trans_start = jiffies;
2221 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2222 return NETDEV_TX_OK;
2225 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2227 struct fe_priv *np = netdev_priv(dev);
2230 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2234 u32 size = skb->len-skb->data_len;
2235 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2237 struct ring_desc_ex* put_tx;
2238 struct ring_desc_ex* start_tx;
2239 struct ring_desc_ex* prev_tx;
2240 struct nv_skb_map* prev_tx_ctx;
2241 struct nv_skb_map* start_tx_ctx;
2242 unsigned long flags;
2244 /* add fragments to entries count */
2245 for (i = 0; i < fragments; i++) {
2246 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2247 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2250 spin_lock_irqsave(&np->lock, flags);
2251 empty_slots = nv_get_empty_tx_slots(np);
2252 if (unlikely(empty_slots <= entries)) {
2253 netif_stop_queue(dev);
2255 spin_unlock_irqrestore(&np->lock, flags);
2256 return NETDEV_TX_BUSY;
2258 spin_unlock_irqrestore(&np->lock, flags);
2260 start_tx = put_tx = np->put_tx.ex;
2261 start_tx_ctx = np->put_tx_ctx;
2263 /* setup the header buffer */
2266 prev_tx_ctx = np->put_tx_ctx;
2267 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2268 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2270 np->put_tx_ctx->dma_len = bcnt;
2271 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2272 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2273 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2275 tx_flags = NV_TX2_VALID;
2278 if (unlikely(put_tx++ == np->last_tx.ex))
2279 put_tx = np->first_tx.ex;
2280 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2281 np->put_tx_ctx = np->first_tx_ctx;
2284 /* setup the fragments */
2285 for (i = 0; i < fragments; i++) {
2286 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2287 u32 size = frag->size;
2292 prev_tx_ctx = np->put_tx_ctx;
2293 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2294 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2296 np->put_tx_ctx->dma_len = bcnt;
2297 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2298 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2299 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2303 if (unlikely(put_tx++ == np->last_tx.ex))
2304 put_tx = np->first_tx.ex;
2305 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2306 np->put_tx_ctx = np->first_tx_ctx;
2310 /* set last fragment flag */
2311 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2313 /* save skb in this slot's context area */
2314 prev_tx_ctx->skb = skb;
2316 if (skb_is_gso(skb))
2317 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2319 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2320 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2323 if (likely(!np->vlangrp)) {
2324 start_tx->txvlan = 0;
2326 if (vlan_tx_tag_present(skb))
2327 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2329 start_tx->txvlan = 0;
2332 spin_lock_irqsave(&np->lock, flags);
2335 /* Limit the number of outstanding tx. Setup all fragments, but
2336 * do not set the VALID bit on the first descriptor. Save a pointer
2337 * to that descriptor and also for next skb_map element.
2340 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2341 if (!np->tx_change_owner)
2342 np->tx_change_owner = start_tx_ctx;
2344 /* remove VALID bit */
2345 tx_flags &= ~NV_TX2_VALID;
2346 start_tx_ctx->first_tx_desc = start_tx;
2347 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2348 np->tx_end_flip = np->put_tx_ctx;
2350 np->tx_pkts_in_progress++;
2355 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2356 np->put_tx.ex = put_tx;
2358 spin_unlock_irqrestore(&np->lock, flags);
2360 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2361 dev->name, entries, tx_flags_extra);
2364 for (j=0; j<64; j++) {
2366 dprintk("\n%03x:", j);
2367 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2372 dev->trans_start = jiffies;
2373 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2374 return NETDEV_TX_OK;
2377 static inline void nv_tx_flip_ownership(struct net_device *dev)
2379 struct fe_priv *np = netdev_priv(dev);
2381 np->tx_pkts_in_progress--;
2382 if (np->tx_change_owner) {
2383 np->tx_change_owner->first_tx_desc->flaglen |=
2384 cpu_to_le32(NV_TX2_VALID);
2385 np->tx_pkts_in_progress++;
2387 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2388 if (np->tx_change_owner == np->tx_end_flip)
2389 np->tx_change_owner = NULL;
2391 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2396 * nv_tx_done: check for completed packets, release the skbs.
2398 * Caller must own np->lock.
2400 static int nv_tx_done(struct net_device *dev, int limit)
2402 struct fe_priv *np = netdev_priv(dev);
2405 struct ring_desc* orig_get_tx = np->get_tx.orig;
2407 while ((np->get_tx.orig != np->put_tx.orig) &&
2408 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2409 (tx_work < limit)) {
2411 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2414 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2415 np->get_tx_ctx->dma_len,
2417 np->get_tx_ctx->dma = 0;
2419 if (np->desc_ver == DESC_VER_1) {
2420 if (flags & NV_TX_LASTPACKET) {
2421 if (flags & NV_TX_ERROR) {
2422 if (flags & NV_TX_UNDERFLOW)
2423 dev->stats.tx_fifo_errors++;
2424 if (flags & NV_TX_CARRIERLOST)
2425 dev->stats.tx_carrier_errors++;
2426 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2427 nv_legacybackoff_reseed(dev);
2428 dev->stats.tx_errors++;
2430 dev->stats.tx_packets++;
2431 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2433 dev_kfree_skb_any(np->get_tx_ctx->skb);
2434 np->get_tx_ctx->skb = NULL;
2438 if (flags & NV_TX2_LASTPACKET) {
2439 if (flags & NV_TX2_ERROR) {
2440 if (flags & NV_TX2_UNDERFLOW)
2441 dev->stats.tx_fifo_errors++;
2442 if (flags & NV_TX2_CARRIERLOST)
2443 dev->stats.tx_carrier_errors++;
2444 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2445 nv_legacybackoff_reseed(dev);
2446 dev->stats.tx_errors++;
2448 dev->stats.tx_packets++;
2449 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2451 dev_kfree_skb_any(np->get_tx_ctx->skb);
2452 np->get_tx_ctx->skb = NULL;
2456 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2457 np->get_tx.orig = np->first_tx.orig;
2458 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2459 np->get_tx_ctx = np->first_tx_ctx;
2461 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2463 netif_wake_queue(dev);
2468 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2470 struct fe_priv *np = netdev_priv(dev);
2473 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2475 while ((np->get_tx.ex != np->put_tx.ex) &&
2476 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2477 (tx_work < limit)) {
2479 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2482 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2483 np->get_tx_ctx->dma_len,
2485 np->get_tx_ctx->dma = 0;
2487 if (flags & NV_TX2_LASTPACKET) {
2488 if (!(flags & NV_TX2_ERROR))
2489 dev->stats.tx_packets++;
2491 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2492 if (np->driver_data & DEV_HAS_GEAR_MODE)
2493 nv_gear_backoff_reseed(dev);
2495 nv_legacybackoff_reseed(dev);
2499 dev_kfree_skb_any(np->get_tx_ctx->skb);
2500 np->get_tx_ctx->skb = NULL;
2504 nv_tx_flip_ownership(dev);
2507 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2508 np->get_tx.ex = np->first_tx.ex;
2509 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2510 np->get_tx_ctx = np->first_tx_ctx;
2512 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2514 netif_wake_queue(dev);
2520 * nv_tx_timeout: dev->tx_timeout function
2521 * Called with netif_tx_lock held.
2523 static void nv_tx_timeout(struct net_device *dev)
2525 struct fe_priv *np = netdev_priv(dev);
2526 u8 __iomem *base = get_hwbase(dev);
2529 if (np->msi_flags & NV_MSI_X_ENABLED)
2530 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2532 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2534 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2539 printk(KERN_INFO "%s: Ring at %lx\n",
2540 dev->name, (unsigned long)np->ring_addr);
2541 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2542 for (i=0;i<=np->register_size;i+= 32) {
2543 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2545 readl(base + i + 0), readl(base + i + 4),
2546 readl(base + i + 8), readl(base + i + 12),
2547 readl(base + i + 16), readl(base + i + 20),
2548 readl(base + i + 24), readl(base + i + 28));
2550 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2551 for (i=0;i<np->tx_ring_size;i+= 4) {
2552 if (!nv_optimized(np)) {
2553 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2555 le32_to_cpu(np->tx_ring.orig[i].buf),
2556 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2557 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2558 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2559 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2560 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2561 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2562 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2564 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2566 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2567 le32_to_cpu(np->tx_ring.ex[i].buflow),
2568 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2569 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2570 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2571 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2572 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2573 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2574 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2575 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2576 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2577 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2582 spin_lock_irq(&np->lock);
2584 /* 1) stop tx engine */
2587 /* 2) check that the packets were not sent already: */
2588 if (!nv_optimized(np))
2589 nv_tx_done(dev, np->tx_ring_size);
2591 nv_tx_done_optimized(dev, np->tx_ring_size);
2593 /* 3) if there are dead entries: clear everything */
2594 if (np->get_tx_ctx != np->put_tx_ctx) {
2595 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2598 setup_hw_rings(dev, NV_SETUP_TX_RING);
2601 netif_wake_queue(dev);
2603 /* 4) restart tx engine */
2605 spin_unlock_irq(&np->lock);
2609 * Called when the nic notices a mismatch between the actual data len on the
2610 * wire and the len indicated in the 802 header
2612 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2614 int hdrlen; /* length of the 802 header */
2615 int protolen; /* length as stored in the proto field */
2617 /* 1) calculate len according to header */
2618 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2619 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2622 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2625 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2626 dev->name, datalen, protolen, hdrlen);
2627 if (protolen > ETH_DATA_LEN)
2628 return datalen; /* Value in proto field not a len, no checks possible */
2631 /* consistency checks: */
2632 if (datalen > ETH_ZLEN) {
2633 if (datalen >= protolen) {
2634 /* more data on wire than in 802 header, trim of
2637 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2638 dev->name, protolen);
2641 /* less data on wire than mentioned in header.
2642 * Discard the packet.
2644 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2649 /* short packet. Accept only if 802 values are also short */
2650 if (protolen > ETH_ZLEN) {
2651 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2655 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2656 dev->name, datalen);
2661 static int nv_rx_process(struct net_device *dev, int limit)
2663 struct fe_priv *np = netdev_priv(dev);
2666 struct sk_buff *skb;
2669 while((np->get_rx.orig != np->put_rx.orig) &&
2670 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2671 (rx_work < limit)) {
2673 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2677 * the packet is for us - immediately tear down the pci mapping.
2678 * TODO: check if a prefetch of the first cacheline improves
2681 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2682 np->get_rx_ctx->dma_len,
2683 PCI_DMA_FROMDEVICE);
2684 skb = np->get_rx_ctx->skb;
2685 np->get_rx_ctx->skb = NULL;
2689 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2690 for (j=0; j<64; j++) {
2692 dprintk("\n%03x:", j);
2693 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2697 /* look at what we actually got: */
2698 if (np->desc_ver == DESC_VER_1) {
2699 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2700 len = flags & LEN_MASK_V1;
2701 if (unlikely(flags & NV_RX_ERROR)) {
2702 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2703 len = nv_getlen(dev, skb->data, len);
2705 dev->stats.rx_errors++;
2710 /* framing errors are soft errors */
2711 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2712 if (flags & NV_RX_SUBSTRACT1) {
2716 /* the rest are hard errors */
2718 if (flags & NV_RX_MISSEDFRAME)
2719 dev->stats.rx_missed_errors++;
2720 if (flags & NV_RX_CRCERR)
2721 dev->stats.rx_crc_errors++;
2722 if (flags & NV_RX_OVERFLOW)
2723 dev->stats.rx_over_errors++;
2724 dev->stats.rx_errors++;
2734 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2735 len = flags & LEN_MASK_V2;
2736 if (unlikely(flags & NV_RX2_ERROR)) {
2737 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2738 len = nv_getlen(dev, skb->data, len);
2740 dev->stats.rx_errors++;
2745 /* framing errors are soft errors */
2746 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2747 if (flags & NV_RX2_SUBSTRACT1) {
2751 /* the rest are hard errors */
2753 if (flags & NV_RX2_CRCERR)
2754 dev->stats.rx_crc_errors++;
2755 if (flags & NV_RX2_OVERFLOW)
2756 dev->stats.rx_over_errors++;
2757 dev->stats.rx_errors++;
2762 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2763 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2764 skb->ip_summed = CHECKSUM_UNNECESSARY;
2770 /* got a valid packet - forward it to the network core */
2772 skb->protocol = eth_type_trans(skb, dev);
2773 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2774 dev->name, len, skb->protocol);
2775 #ifdef CONFIG_FORCEDETH_NAPI
2776 netif_receive_skb(skb);
2780 dev->stats.rx_packets++;
2781 dev->stats.rx_bytes += len;
2783 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2784 np->get_rx.orig = np->first_rx.orig;
2785 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2786 np->get_rx_ctx = np->first_rx_ctx;
2794 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2796 struct fe_priv *np = netdev_priv(dev);
2800 struct sk_buff *skb;
2803 while((np->get_rx.ex != np->put_rx.ex) &&
2804 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2805 (rx_work < limit)) {
2807 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2811 * the packet is for us - immediately tear down the pci mapping.
2812 * TODO: check if a prefetch of the first cacheline improves
2815 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2816 np->get_rx_ctx->dma_len,
2817 PCI_DMA_FROMDEVICE);
2818 skb = np->get_rx_ctx->skb;
2819 np->get_rx_ctx->skb = NULL;
2823 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2824 for (j=0; j<64; j++) {
2826 dprintk("\n%03x:", j);
2827 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2831 /* look at what we actually got: */
2832 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2833 len = flags & LEN_MASK_V2;
2834 if (unlikely(flags & NV_RX2_ERROR)) {
2835 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2836 len = nv_getlen(dev, skb->data, len);
2842 /* framing errors are soft errors */
2843 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2844 if (flags & NV_RX2_SUBSTRACT1) {
2848 /* the rest are hard errors */
2855 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2856 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2857 skb->ip_summed = CHECKSUM_UNNECESSARY;
2859 /* got a valid packet - forward it to the network core */
2861 skb->protocol = eth_type_trans(skb, dev);
2862 prefetch(skb->data);
2864 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2865 dev->name, len, skb->protocol);
2867 if (likely(!np->vlangrp)) {
2868 #ifdef CONFIG_FORCEDETH_NAPI
2869 netif_receive_skb(skb);
2874 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2875 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2876 #ifdef CONFIG_FORCEDETH_NAPI
2877 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2878 vlanflags & NV_RX3_VLAN_TAG_MASK);
2880 vlan_hwaccel_rx(skb, np->vlangrp,
2881 vlanflags & NV_RX3_VLAN_TAG_MASK);
2884 #ifdef CONFIG_FORCEDETH_NAPI
2885 netif_receive_skb(skb);
2892 dev->stats.rx_packets++;
2893 dev->stats.rx_bytes += len;
2898 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2899 np->get_rx.ex = np->first_rx.ex;
2900 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2901 np->get_rx_ctx = np->first_rx_ctx;
2909 static void set_bufsize(struct net_device *dev)
2911 struct fe_priv *np = netdev_priv(dev);
2913 if (dev->mtu <= ETH_DATA_LEN)
2914 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2916 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2920 * nv_change_mtu: dev->change_mtu function
2921 * Called with dev_base_lock held for read.
2923 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2925 struct fe_priv *np = netdev_priv(dev);
2928 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2934 /* return early if the buffer sizes will not change */
2935 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2937 if (old_mtu == new_mtu)
2940 /* synchronized against open : rtnl_lock() held by caller */
2941 if (netif_running(dev)) {
2942 u8 __iomem *base = get_hwbase(dev);
2944 * It seems that the nic preloads valid ring entries into an
2945 * internal buffer. The procedure for flushing everything is
2946 * guessed, there is probably a simpler approach.
2947 * Changing the MTU is a rare event, it shouldn't matter.
2949 nv_disable_irq(dev);
2950 nv_napi_disable(dev);
2951 netif_tx_lock_bh(dev);
2952 netif_addr_lock(dev);
2953 spin_lock(&np->lock);
2957 /* drain rx queue */
2959 /* reinit driver view of the rx queue */
2961 if (nv_init_ring(dev)) {
2962 if (!np->in_shutdown)
2963 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2965 /* reinit nic view of the rx queue */
2966 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2967 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2968 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2969 base + NvRegRingSizes);
2971 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2974 /* restart rx engine */
2976 spin_unlock(&np->lock);
2977 netif_addr_unlock(dev);
2978 netif_tx_unlock_bh(dev);
2979 nv_napi_enable(dev);
2985 static void nv_copy_mac_to_hw(struct net_device *dev)
2987 u8 __iomem *base = get_hwbase(dev);
2990 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2991 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2992 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2994 writel(mac[0], base + NvRegMacAddrA);
2995 writel(mac[1], base + NvRegMacAddrB);
2999 * nv_set_mac_address: dev->set_mac_address function
3000 * Called with rtnl_lock() held.
3002 static int nv_set_mac_address(struct net_device *dev, void *addr)
3004 struct fe_priv *np = netdev_priv(dev);
3005 struct sockaddr *macaddr = (struct sockaddr*)addr;
3007 if (!is_valid_ether_addr(macaddr->sa_data))
3008 return -EADDRNOTAVAIL;
3010 /* synchronized against open : rtnl_lock() held by caller */
3011 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3013 if (netif_running(dev)) {
3014 netif_tx_lock_bh(dev);
3015 netif_addr_lock(dev);
3016 spin_lock_irq(&np->lock);
3018 /* stop rx engine */
3021 /* set mac address */
3022 nv_copy_mac_to_hw(dev);
3024 /* restart rx engine */
3026 spin_unlock_irq(&np->lock);
3027 netif_addr_unlock(dev);
3028 netif_tx_unlock_bh(dev);
3030 nv_copy_mac_to_hw(dev);
3036 * nv_set_multicast: dev->set_multicast function
3037 * Called with netif_tx_lock held.
3039 static void nv_set_multicast(struct net_device *dev)
3041 struct fe_priv *np = netdev_priv(dev);
3042 u8 __iomem *base = get_hwbase(dev);
3045 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3047 memset(addr, 0, sizeof(addr));
3048 memset(mask, 0, sizeof(mask));
3050 if (dev->flags & IFF_PROMISC) {
3051 pff |= NVREG_PFF_PROMISC;
3053 pff |= NVREG_PFF_MYADDR;
3055 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
3059 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3060 if (dev->flags & IFF_ALLMULTI) {
3061 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3063 struct dev_mc_list *walk;
3065 walk = dev->mc_list;
3066 while (walk != NULL) {
3068 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
3069 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
3077 addr[0] = alwaysOn[0];
3078 addr[1] = alwaysOn[1];
3079 mask[0] = alwaysOn[0] | alwaysOff[0];
3080 mask[1] = alwaysOn[1] | alwaysOff[1];
3082 mask[0] = NVREG_MCASTMASKA_NONE;
3083 mask[1] = NVREG_MCASTMASKB_NONE;
3086 addr[0] |= NVREG_MCASTADDRA_FORCE;
3087 pff |= NVREG_PFF_ALWAYS;
3088 spin_lock_irq(&np->lock);
3090 writel(addr[0], base + NvRegMulticastAddrA);
3091 writel(addr[1], base + NvRegMulticastAddrB);
3092 writel(mask[0], base + NvRegMulticastMaskA);
3093 writel(mask[1], base + NvRegMulticastMaskB);
3094 writel(pff, base + NvRegPacketFilterFlags);
3095 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3098 spin_unlock_irq(&np->lock);
3101 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3103 struct fe_priv *np = netdev_priv(dev);
3104 u8 __iomem *base = get_hwbase(dev);
3106 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3108 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3109 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3110 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3111 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3112 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3114 writel(pff, base + NvRegPacketFilterFlags);
3117 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3118 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3119 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3120 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3121 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3122 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3123 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3124 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3125 /* limit the number of tx pause frames to a default of 8 */
3126 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3128 writel(pause_enable, base + NvRegTxPauseFrame);
3129 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3130 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3132 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3133 writel(regmisc, base + NvRegMisc1);
3139 * nv_update_linkspeed: Setup the MAC according to the link partner
3140 * @dev: Network device to be configured
3142 * The function queries the PHY and checks if there is a link partner.
3143 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3144 * set to 10 MBit HD.
3146 * The function returns 0 if there is no link partner and 1 if there is
3147 * a good link partner.
3149 static int nv_update_linkspeed(struct net_device *dev)
3151 struct fe_priv *np = netdev_priv(dev);
3152 u8 __iomem *base = get_hwbase(dev);
3155 int adv_lpa, adv_pause, lpa_pause;
3156 int newls = np->linkspeed;
3157 int newdup = np->duplex;
3160 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3164 /* BMSR_LSTATUS is latched, read it twice:
3165 * we want the current value.
3167 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3168 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3170 if (!(mii_status & BMSR_LSTATUS)) {
3171 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3173 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3179 if (np->autoneg == 0) {
3180 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3181 dev->name, np->fixed_mode);
3182 if (np->fixed_mode & LPA_100FULL) {
3183 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3185 } else if (np->fixed_mode & LPA_100HALF) {
3186 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3188 } else if (np->fixed_mode & LPA_10FULL) {
3189 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3192 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3198 /* check auto negotiation is complete */
3199 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3200 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3201 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3204 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3208 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3209 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3210 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3211 dev->name, adv, lpa);
3214 if (np->gigabit == PHY_GIGABIT) {
3215 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3216 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3218 if ((control_1000 & ADVERTISE_1000FULL) &&
3219 (status_1000 & LPA_1000FULL)) {
3220 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3222 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3228 /* FIXME: handle parallel detection properly */
3229 adv_lpa = lpa & adv;
3230 if (adv_lpa & LPA_100FULL) {
3231 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3233 } else if (adv_lpa & LPA_100HALF) {
3234 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3236 } else if (adv_lpa & LPA_10FULL) {
3237 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3239 } else if (adv_lpa & LPA_10HALF) {
3240 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3243 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3244 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3249 if (np->duplex == newdup && np->linkspeed == newls)
3252 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3253 dev->name, np->linkspeed, np->duplex, newls, newdup);
3255 np->duplex = newdup;
3256 np->linkspeed = newls;
3258 /* The transmitter and receiver must be restarted for safe update */
3259 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3260 txrxFlags |= NV_RESTART_TX;
3263 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3264 txrxFlags |= NV_RESTART_RX;
3268 if (np->gigabit == PHY_GIGABIT) {
3269 phyreg = readl(base + NvRegSlotTime);
3270 phyreg &= ~(0x3FF00);
3271 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3272 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3273 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3274 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3275 phyreg |= NVREG_SLOTTIME_1000_FULL;
3276 writel(phyreg, base + NvRegSlotTime);
3279 phyreg = readl(base + NvRegPhyInterface);
3280 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3281 if (np->duplex == 0)
3283 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3285 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3287 writel(phyreg, base + NvRegPhyInterface);
3289 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3290 if (phyreg & PHY_RGMII) {
3291 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3292 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3294 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3295 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3296 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3298 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3300 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3304 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3305 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3307 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3309 writel(txreg, base + NvRegTxDeferral);
3311 if (np->desc_ver == DESC_VER_1) {
3312 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3314 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3315 txreg = NVREG_TX_WM_DESC2_3_1000;
3317 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3319 writel(txreg, base + NvRegTxWatermark);
3321 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
3324 writel(np->linkspeed, base + NvRegLinkSpeed);
3328 /* setup pause frame */
3329 if (np->duplex != 0) {
3330 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3331 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
3332 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
3334 switch (adv_pause) {
3335 case ADVERTISE_PAUSE_CAP:
3336 if (lpa_pause & LPA_PAUSE_CAP) {
3337 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3338 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3339 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3342 case ADVERTISE_PAUSE_ASYM:
3343 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
3345 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3348 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
3349 if (lpa_pause & LPA_PAUSE_CAP)
3351 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3352 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3353 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3355 if (lpa_pause == LPA_PAUSE_ASYM)
3357 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3362 pause_flags = np->pause_flags;
3365 nv_update_pause(dev, pause_flags);
3367 if (txrxFlags & NV_RESTART_TX)
3369 if (txrxFlags & NV_RESTART_RX)
3375 static void nv_linkchange(struct net_device *dev)
3377 if (nv_update_linkspeed(dev)) {
3378 if (!netif_carrier_ok(dev)) {
3379 netif_carrier_on(dev);
3380 printk(KERN_INFO "%s: link up.\n", dev->name);
3384 if (netif_carrier_ok(dev)) {
3385 netif_carrier_off(dev);
3386 printk(KERN_INFO "%s: link down.\n", dev->name);
3392 static void nv_link_irq(struct net_device *dev)
3394 u8 __iomem *base = get_hwbase(dev);
3397 miistat = readl(base + NvRegMIIStatus);
3398 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3399 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3401 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3403 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3406 static void nv_msi_workaround(struct fe_priv *np)
3409 /* Need to toggle the msi irq mask within the ethernet device,
3410 * otherwise, future interrupts will not be detected.
3412 if (np->msi_flags & NV_MSI_ENABLED) {
3413 u8 __iomem *base = np->base;
3415 writel(0, base + NvRegMSIIrqMask);
3416 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3420 static irqreturn_t nv_nic_irq(int foo, void *data)
3422 struct net_device *dev = (struct net_device *) data;
3423 struct fe_priv *np = netdev_priv(dev);
3424 u8 __iomem *base = get_hwbase(dev);
3427 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3430 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3431 np->events = readl(base + NvRegIrqStatus);
3432 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3434 np->events = readl(base + NvRegMSIXIrqStatus);
3435 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3437 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3438 if (!(np->events & np->irqmask))
3441 nv_msi_workaround(np);
3443 #ifdef CONFIG_FORCEDETH_NAPI
3444 spin_lock(&np->lock);
3445 napi_schedule(&np->napi);
3447 /* Disable furthur irq's
3448 (msix not enabled with napi) */
3449 writel(0, base + NvRegIrqMask);
3451 spin_unlock(&np->lock);
3455 spin_lock(&np->lock);
3456 nv_tx_done(dev, np->tx_ring_size);
3457 spin_unlock(&np->lock);
3459 if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
3460 if (unlikely(nv_alloc_rx(dev))) {
3461 spin_lock(&np->lock);
3462 if (!np->in_shutdown)
3463 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3464 spin_unlock(&np->lock);
3468 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3469 spin_lock(&np->lock);
3471 spin_unlock(&np->lock);
3473 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3474 spin_lock(&np->lock);
3476 spin_unlock(&np->lock);
3477 np->link_timeout = jiffies + LINK_TIMEOUT;
3479 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3480 spin_lock(&np->lock);
3481 /* disable interrupts on the nic */
3482 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3483 writel(0, base + NvRegIrqMask);
3485 writel(np->irqmask, base + NvRegIrqMask);
3488 if (!np->in_shutdown) {
3489 np->nic_poll_irq = np->irqmask;
3490 np->recover_error = 1;
3491 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3493 spin_unlock(&np->lock);
3496 if (unlikely(i > max_interrupt_work)) {
3497 spin_lock(&np->lock);
3498 /* disable interrupts on the nic */
3499 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3500 writel(0, base + NvRegIrqMask);
3502 writel(np->irqmask, base + NvRegIrqMask);
3505 if (!np->in_shutdown) {
3506 np->nic_poll_irq = np->irqmask;
3507 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3509 spin_unlock(&np->lock);
3510 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3517 return IRQ_RETVAL(i);
3521 * All _optimized functions are used to help increase performance
3522 * (reduce CPU and increase throughput). They use descripter version 3,
3523 * compiler directives, and reduce memory accesses.
3525 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3527 struct net_device *dev = (struct net_device *) data;
3528 struct fe_priv *np = netdev_priv(dev);
3529 u8 __iomem *base = get_hwbase(dev);
3532 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3535 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3536 np->events = readl(base + NvRegIrqStatus);
3537 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3539 np->events = readl(base + NvRegMSIXIrqStatus);
3540 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3542 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3543 if (!(np->events & np->irqmask))
3546 nv_msi_workaround(np);
3548 #ifdef CONFIG_FORCEDETH_NAPI
3549 spin_lock(&np->lock);
3550 napi_schedule(&np->napi);
3552 /* Disable furthur irq's
3553 (msix not enabled with napi) */
3554 writel(0, base + NvRegIrqMask);
3556 spin_unlock(&np->lock);
3560 spin_lock(&np->lock);
3561 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3562 spin_unlock(&np->lock);
3564 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3565 if (unlikely(nv_alloc_rx_optimized(dev))) {
3566 spin_lock(&np->lock);
3567 if (!np->in_shutdown)
3568 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3569 spin_unlock(&np->lock);
3573 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3574 spin_lock(&np->lock);
3576 spin_unlock(&np->lock);
3578 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3579 spin_lock(&np->lock);
3581 spin_unlock(&np->lock);
3582 np->link_timeout = jiffies + LINK_TIMEOUT;
3584 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3585 spin_lock(&np->lock);
3586 /* disable interrupts on the nic */
3587 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3588 writel(0, base + NvRegIrqMask);
3590 writel(np->irqmask, base + NvRegIrqMask);
3593 if (!np->in_shutdown) {
3594 np->nic_poll_irq = np->irqmask;
3595 np->recover_error = 1;
3596 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3598 spin_unlock(&np->lock);
3602 if (unlikely(i > max_interrupt_work)) {
3603 spin_lock(&np->lock);
3604 /* disable interrupts on the nic */
3605 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3606 writel(0, base + NvRegIrqMask);
3608 writel(np->irqmask, base + NvRegIrqMask);
3611 if (!np->in_shutdown) {
3612 np->nic_poll_irq = np->irqmask;
3613 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3615 spin_unlock(&np->lock);
3616 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3621 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3623 return IRQ_RETVAL(i);
3626 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3628 struct net_device *dev = (struct net_device *) data;
3629 struct fe_priv *np = netdev_priv(dev);
3630 u8 __iomem *base = get_hwbase(dev);
3633 unsigned long flags;
3635 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3638 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3639 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3640 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3641 if (!(events & np->irqmask))
3644 spin_lock_irqsave(&np->lock, flags);
3645 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3646 spin_unlock_irqrestore(&np->lock, flags);
3648 if (unlikely(i > max_interrupt_work)) {
3649 spin_lock_irqsave(&np->lock, flags);
3650 /* disable interrupts on the nic */
3651 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3654 if (!np->in_shutdown) {
3655 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3656 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3658 spin_unlock_irqrestore(&np->lock, flags);
3659 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3664 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3666 return IRQ_RETVAL(i);
3669 #ifdef CONFIG_FORCEDETH_NAPI
3670 static int nv_napi_poll(struct napi_struct *napi, int budget)
3672 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3673 struct net_device *dev = np->dev;
3674 u8 __iomem *base = get_hwbase(dev);
3675 unsigned long flags;
3678 if (!nv_optimized(np)) {
3679 spin_lock_irqsave(&np->lock, flags);
3680 nv_tx_done(dev, np->tx_ring_size);
3681 spin_unlock_irqrestore(&np->lock, flags);
3683 pkts = nv_rx_process(dev, budget);
3684 retcode = nv_alloc_rx(dev);
3686 spin_lock_irqsave(&np->lock, flags);
3687 nv_tx_done_optimized(dev, np->tx_ring_size);
3688 spin_unlock_irqrestore(&np->lock, flags);
3690 pkts = nv_rx_process_optimized(dev, budget);
3691 retcode = nv_alloc_rx_optimized(dev);
3695 spin_lock_irqsave(&np->lock, flags);
3696 if (!np->in_shutdown)
3697 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3698 spin_unlock_irqrestore(&np->lock, flags);
3701 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3702 spin_lock_irqsave(&np->lock, flags);
3704 spin_unlock_irqrestore(&np->lock, flags);
3706 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3707 spin_lock_irqsave(&np->lock, flags);
3709 spin_unlock_irqrestore(&np->lock, flags);
3710 np->link_timeout = jiffies + LINK_TIMEOUT;
3712 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3713 spin_lock_irqsave(&np->lock, flags);
3714 if (!np->in_shutdown) {
3715 np->nic_poll_irq = np->irqmask;
3716 np->recover_error = 1;
3717 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3719 spin_unlock_irqrestore(&np->lock, flags);
3720 __napi_complete(napi);
3724 if (pkts < budget) {
3725 /* re-enable interrupts
3726 (msix not enabled in napi) */
3727 spin_lock_irqsave(&np->lock, flags);
3729 __napi_complete(napi);
3731 writel(np->irqmask, base + NvRegIrqMask);
3733 spin_unlock_irqrestore(&np->lock, flags);
3739 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3741 struct net_device *dev = (struct net_device *) data;
3742 struct fe_priv *np = netdev_priv(dev);
3743 u8 __iomem *base = get_hwbase(dev);
3746 unsigned long flags;
3748 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3751 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3752 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3753 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3754 if (!(events & np->irqmask))
3757 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3758 if (unlikely(nv_alloc_rx_optimized(dev))) {
3759 spin_lock_irqsave(&np->lock, flags);
3760 if (!np->in_shutdown)
3761 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3762 spin_unlock_irqrestore(&np->lock, flags);
3766 if (unlikely(i > max_interrupt_work)) {
3767 spin_lock_irqsave(&np->lock, flags);
3768 /* disable interrupts on the nic */
3769 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3772 if (!np->in_shutdown) {
3773 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3774 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3776 spin_unlock_irqrestore(&np->lock, flags);
3777 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3781 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3783 return IRQ_RETVAL(i);
3786 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3788 struct net_device *dev = (struct net_device *) data;
3789 struct fe_priv *np = netdev_priv(dev);
3790 u8 __iomem *base = get_hwbase(dev);
3793 unsigned long flags;
3795 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3798 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3799 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3800 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3801 if (!(events & np->irqmask))
3804 /* check tx in case we reached max loop limit in tx isr */
3805 spin_lock_irqsave(&np->lock, flags);
3806 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3807 spin_unlock_irqrestore(&np->lock, flags);
3809 if (events & NVREG_IRQ_LINK) {
3810 spin_lock_irqsave(&np->lock, flags);
3812 spin_unlock_irqrestore(&np->lock, flags);
3814 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3815 spin_lock_irqsave(&np->lock, flags);
3817 spin_unlock_irqrestore(&np->lock, flags);
3818 np->link_timeout = jiffies + LINK_TIMEOUT;
3820 if (events & NVREG_IRQ_RECOVER_ERROR) {
3821 spin_lock_irq(&np->lock);
3822 /* disable interrupts on the nic */
3823 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3826 if (!np->in_shutdown) {
3827 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3828 np->recover_error = 1;
3829 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3831 spin_unlock_irq(&np->lock);
3834 if (unlikely(i > max_interrupt_work)) {
3835 spin_lock_irqsave(&np->lock, flags);
3836 /* disable interrupts on the nic */
3837 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3840 if (!np->in_shutdown) {
3841 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3842 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3844 spin_unlock_irqrestore(&np->lock, flags);
3845 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3850 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3852 return IRQ_RETVAL(i);
3855 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3857 struct net_device *dev = (struct net_device *) data;
3858 struct fe_priv *np = netdev_priv(dev);
3859 u8 __iomem *base = get_hwbase(dev);
3862 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3864 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3865 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3866 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3868 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3869 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3872 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3873 if (!(events & NVREG_IRQ_TIMER))
3874 return IRQ_RETVAL(0);
3876 nv_msi_workaround(np);
3878 spin_lock(&np->lock);
3880 spin_unlock(&np->lock);
3882 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3884 return IRQ_RETVAL(1);
3887 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3889 u8 __iomem *base = get_hwbase(dev);
3893 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3894 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3895 * the remaining 8 interrupts.
3897 for (i = 0; i < 8; i++) {
3898 if ((irqmask >> i) & 0x1) {
3899 msixmap |= vector << (i << 2);
3902 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3905 for (i = 0; i < 8; i++) {
3906 if ((irqmask >> (i + 8)) & 0x1) {
3907 msixmap |= vector << (i << 2);
3910 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3913 static int nv_request_irq(struct net_device *dev, int intr_test)
3915 struct fe_priv *np = get_nvpriv(dev);
3916 u8 __iomem *base = get_hwbase(dev);
3919 irqreturn_t (*handler)(int foo, void *data);
3922 handler = nv_nic_irq_test;
3924 if (nv_optimized(np))
3925 handler = nv_nic_irq_optimized;
3927 handler = nv_nic_irq;
3930 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3931 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3932 np->msi_x_entry[i].entry = i;
3934 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3935 np->msi_flags |= NV_MSI_X_ENABLED;
3936 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3937 /* Request irq for rx handling */
3938 sprintf(np->name_rx, "%s-rx", dev->name);
3939 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3940 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3941 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3942 pci_disable_msix(np->pci_dev);
3943 np->msi_flags &= ~NV_MSI_X_ENABLED;
3946 /* Request irq for tx handling */
3947 sprintf(np->name_tx, "%s-tx", dev->name);
3948 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3949 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3950 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3951 pci_disable_msix(np->pci_dev);
3952 np->msi_flags &= ~NV_MSI_X_ENABLED;
3955 /* Request irq for link and timer handling */
3956 sprintf(np->name_other, "%s-other", dev->name);
3957 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3958 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3959 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3960 pci_disable_msix(np->pci_dev);
3961 np->msi_flags &= ~NV_MSI_X_ENABLED;
3964 /* map interrupts to their respective vector */
3965 writel(0, base + NvRegMSIXMap0);
3966 writel(0, base + NvRegMSIXMap1);
3967 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3968 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3969 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3971 /* Request irq for all interrupts */
3972 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3973 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3974 pci_disable_msix(np->pci_dev);
3975 np->msi_flags &= ~NV_MSI_X_ENABLED;
3979 /* map interrupts to vector 0 */
3980 writel(0, base + NvRegMSIXMap0);
3981 writel(0, base + NvRegMSIXMap1);
3985 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3986 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3987 np->msi_flags |= NV_MSI_ENABLED;
3988 dev->irq = np->pci_dev->irq;
3989 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3990 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3991 pci_disable_msi(np->pci_dev);
3992 np->msi_flags &= ~NV_MSI_ENABLED;
3993 dev->irq = np->pci_dev->irq;
3997 /* map interrupts to vector 0 */
3998 writel(0, base + NvRegMSIMap0);
3999 writel(0, base + NvRegMSIMap1);
4000 /* enable msi vector 0 */
4001 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4005 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4012 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4014 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4019 static void nv_free_irq(struct net_device *dev)
4021 struct fe_priv *np = get_nvpriv(dev);
4024 if (np->msi_flags & NV_MSI_X_ENABLED) {
4025 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
4026 free_irq(np->msi_x_entry[i].vector, dev);
4028 pci_disable_msix(np->pci_dev);
4029 np->msi_flags &= ~NV_MSI_X_ENABLED;
4031 free_irq(np->pci_dev->irq, dev);
4032 if (np->msi_flags & NV_MSI_ENABLED) {
4033 pci_disable_msi(np->pci_dev);
4034 np->msi_flags &= ~NV_MSI_ENABLED;
4039 static void nv_do_nic_poll(unsigned long data)
4041 struct net_device *dev = (struct net_device *) data;
4042 struct fe_priv *np = netdev_priv(dev);
4043 u8 __iomem *base = get_hwbase(dev);
4047 * First disable irq(s) and then
4048 * reenable interrupts on the nic, we have to do this before calling
4049 * nv_nic_irq because that may decide to do otherwise
4052 if (!using_multi_irqs(dev)) {
4053 if (np->msi_flags & NV_MSI_X_ENABLED)
4054 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4056 disable_irq_lockdep(np->pci_dev->irq);
4059 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4060 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4061 mask |= NVREG_IRQ_RX_ALL;
4063 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4064 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4065 mask |= NVREG_IRQ_TX_ALL;
4067 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4068 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4069 mask |= NVREG_IRQ_OTHER;
4072 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
4074 if (np->recover_error) {
4075 np->recover_error = 0;
4076 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
4077 if (netif_running(dev)) {
4078 netif_tx_lock_bh(dev);
4079 netif_addr_lock(dev);
4080 spin_lock(&np->lock);
4083 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4086 /* drain rx queue */
4088 /* reinit driver view of the rx queue */
4090 if (nv_init_ring(dev)) {
4091 if (!np->in_shutdown)
4092 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4094 /* reinit nic view of the rx queue */
4095 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4096 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4097 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4098 base + NvRegRingSizes);
4100 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4102 /* clear interrupts */
4103 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4104 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4106 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4108 /* restart rx engine */
4110 spin_unlock(&np->lock);
4111 netif_addr_unlock(dev);
4112 netif_tx_unlock_bh(dev);
4116 writel(mask, base + NvRegIrqMask);
4119 if (!using_multi_irqs(dev)) {
4120 np->nic_poll_irq = 0;
4121 if (nv_optimized(np))
4122 nv_nic_irq_optimized(0, dev);
4125 if (np->msi_flags & NV_MSI_X_ENABLED)
4126 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4128 enable_irq_lockdep(np->pci_dev->irq);
4130 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4131 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4132 nv_nic_irq_rx(0, dev);
4133 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4135 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4136 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4137 nv_nic_irq_tx(0, dev);
4138 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4140 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4141 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4142 nv_nic_irq_other(0, dev);
4143 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4149 #ifdef CONFIG_NET_POLL_CONTROLLER
4150 static void nv_poll_controller(struct net_device *dev)
4152 nv_do_nic_poll((unsigned long) dev);
4156 static void nv_do_stats_poll(unsigned long data)
4158 struct net_device *dev = (struct net_device *) data;
4159 struct fe_priv *np = netdev_priv(dev);
4161 nv_get_hw_stats(dev);
4163 if (!np->in_shutdown)
4164 mod_timer(&np->stats_poll,
4165 round_jiffies(jiffies + STATS_INTERVAL));
4168 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4170 struct fe_priv *np = netdev_priv(dev);
4171 strcpy(info->driver, DRV_NAME);
4172 strcpy(info->version, FORCEDETH_VERSION);
4173 strcpy(info->bus_info, pci_name(np->pci_dev));
4176 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4178 struct fe_priv *np = netdev_priv(dev);
4179 wolinfo->supported = WAKE_MAGIC;
4181 spin_lock_irq(&np->lock);
4183 wolinfo->wolopts = WAKE_MAGIC;
4184 spin_unlock_irq(&np->lock);
4187 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4189 struct fe_priv *np = netdev_priv(dev);
4190 u8 __iomem *base = get_hwbase(dev);
4193 if (wolinfo->wolopts == 0) {
4195 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4197 flags = NVREG_WAKEUPFLAGS_ENABLE;
4199 if (netif_running(dev)) {
4200 spin_lock_irq(&np->lock);
4201 writel(flags, base + NvRegWakeUpFlags);
4202 spin_unlock_irq(&np->lock);
4207 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4209 struct fe_priv *np = netdev_priv(dev);
4212 spin_lock_irq(&np->lock);
4213 ecmd->port = PORT_MII;
4214 if (!netif_running(dev)) {
4215 /* We do not track link speed / duplex setting if the
4216 * interface is disabled. Force a link check */
4217 if (nv_update_linkspeed(dev)) {
4218 if (!netif_carrier_ok(dev))
4219 netif_carrier_on(dev);
4221 if (netif_carrier_ok(dev))
4222 netif_carrier_off(dev);
4226 if (netif_carrier_ok(dev)) {
4227 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4228 case NVREG_LINKSPEED_10:
4229 ecmd->speed = SPEED_10;
4231 case NVREG_LINKSPEED_100:
4232 ecmd->speed = SPEED_100;
4234 case NVREG_LINKSPEED_1000:
4235 ecmd->speed = SPEED_1000;
4238 ecmd->duplex = DUPLEX_HALF;
4240 ecmd->duplex = DUPLEX_FULL;
4246 ecmd->autoneg = np->autoneg;
4248 ecmd->advertising = ADVERTISED_MII;
4250 ecmd->advertising |= ADVERTISED_Autoneg;
4251 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4252 if (adv & ADVERTISE_10HALF)
4253 ecmd->advertising |= ADVERTISED_10baseT_Half;
4254 if (adv & ADVERTISE_10FULL)
4255 ecmd->advertising |= ADVERTISED_10baseT_Full;
4256 if (adv & ADVERTISE_100HALF)
4257 ecmd->advertising |= ADVERTISED_100baseT_Half;
4258 if (adv & ADVERTISE_100FULL)
4259 ecmd->advertising |= ADVERTISED_100baseT_Full;
4260 if (np->gigabit == PHY_GIGABIT) {
4261 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4262 if (adv & ADVERTISE_1000FULL)
4263 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4266 ecmd->supported = (SUPPORTED_Autoneg |
4267 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4268 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4270 if (np->gigabit == PHY_GIGABIT)
4271 ecmd->supported |= SUPPORTED_1000baseT_Full;
4273 ecmd->phy_address = np->phyaddr;
4274 ecmd->transceiver = XCVR_EXTERNAL;
4276 /* ignore maxtxpkt, maxrxpkt for now */
4277 spin_unlock_irq(&np->lock);
4281 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4283 struct fe_priv *np = netdev_priv(dev);
4285 if (ecmd->port != PORT_MII)
4287 if (ecmd->transceiver != XCVR_EXTERNAL)
4289 if (ecmd->phy_address != np->phyaddr) {
4290 /* TODO: support switching between multiple phys. Should be
4291 * trivial, but not enabled due to lack of test hardware. */
4294 if (ecmd->autoneg == AUTONEG_ENABLE) {
4297 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4298 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4299 if (np->gigabit == PHY_GIGABIT)
4300 mask |= ADVERTISED_1000baseT_Full;
4302 if ((ecmd->advertising & mask) == 0)
4305 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4306 /* Note: autonegotiation disable, speed 1000 intentionally
4307 * forbidden - noone should need that. */
4309 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4311 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4317 netif_carrier_off(dev);
4318 if (netif_running(dev)) {
4319 unsigned long flags;
4321 nv_disable_irq(dev);
4322 netif_tx_lock_bh(dev);
4323 netif_addr_lock(dev);
4324 /* with plain spinlock lockdep complains */
4325 spin_lock_irqsave(&np->lock, flags);
4328 * this can take some time, and interrupts are disabled
4329 * due to spin_lock_irqsave, but let's hope no daemon
4330 * is going to change the settings very often...
4332 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4333 * + some minor delays, which is up to a second approximately
4336 spin_unlock_irqrestore(&np->lock, flags);
4337 netif_addr_unlock(dev);
4338 netif_tx_unlock_bh(dev);
4341 if (ecmd->autoneg == AUTONEG_ENABLE) {
4346 /* advertise only what has been requested */
4347 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4348 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4349 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4350 adv |= ADVERTISE_10HALF;
4351 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4352 adv |= ADVERTISE_10FULL;
4353 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4354 adv |= ADVERTISE_100HALF;
4355 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4356 adv |= ADVERTISE_100FULL;
4357 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4358 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4359 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4360 adv |= ADVERTISE_PAUSE_ASYM;
4361 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4363 if (np->gigabit == PHY_GIGABIT) {
4364 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4365 adv &= ~ADVERTISE_1000FULL;
4366 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4367 adv |= ADVERTISE_1000FULL;
4368 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4371 if (netif_running(dev))
4372 printk(KERN_INFO "%s: link down.\n", dev->name);
4373 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4374 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4375 bmcr |= BMCR_ANENABLE;
4376 /* reset the phy in order for settings to stick,
4377 * and cause autoneg to start */
4378 if (phy_reset(dev, bmcr)) {
4379 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4383 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4384 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4391 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4392 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4393 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4394 adv |= ADVERTISE_10HALF;
4395 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4396 adv |= ADVERTISE_10FULL;
4397 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4398 adv |= ADVERTISE_100HALF;
4399 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4400 adv |= ADVERTISE_100FULL;
4401 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4402 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4403 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4404 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4406 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4407 adv |= ADVERTISE_PAUSE_ASYM;
4408 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4410 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4411 np->fixed_mode = adv;
4413 if (np->gigabit == PHY_GIGABIT) {
4414 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4415 adv &= ~ADVERTISE_1000FULL;
4416 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4419 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4420 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4421 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4422 bmcr |= BMCR_FULLDPLX;
4423 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4424 bmcr |= BMCR_SPEED100;
4425 if (np->phy_oui == PHY_OUI_MARVELL) {
4426 /* reset the phy in order for forced mode settings to stick */
4427 if (phy_reset(dev, bmcr)) {
4428 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4432 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4433 if (netif_running(dev)) {
4434 /* Wait a bit and then reconfigure the nic. */
4441 if (netif_running(dev)) {
4449 #define FORCEDETH_REGS_VER 1
4451 static int nv_get_regs_len(struct net_device *dev)
4453 struct fe_priv *np = netdev_priv(dev);
4454 return np->register_size;
4457 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4459 struct fe_priv *np = netdev_priv(dev);
4460 u8 __iomem *base = get_hwbase(dev);
4464 regs->version = FORCEDETH_REGS_VER;
4465 spin_lock_irq(&np->lock);
4466 for (i = 0;i <= np->register_size/sizeof(u32); i++)
4467 rbuf[i] = readl(base + i*sizeof(u32));
4468 spin_unlock_irq(&np->lock);
4471 static int nv_nway_reset(struct net_device *dev)
4473 struct fe_priv *np = netdev_priv(dev);
4479 netif_carrier_off(dev);
4480 if (netif_running(dev)) {
4481 nv_disable_irq(dev);
4482 netif_tx_lock_bh(dev);
4483 netif_addr_lock(dev);
4484 spin_lock(&np->lock);
4487 spin_unlock(&np->lock);
4488 netif_addr_unlock(dev);
4489 netif_tx_unlock_bh(dev);
4490 printk(KERN_INFO "%s: link down.\n", dev->name);
4493 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4494 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4495 bmcr |= BMCR_ANENABLE;
4496 /* reset the phy in order for settings to stick*/
4497 if (phy_reset(dev, bmcr)) {
4498 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4502 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4503 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4506 if (netif_running(dev)) {
4518 static int nv_set_tso(struct net_device *dev, u32 value)
4520 struct fe_priv *np = netdev_priv(dev);
4522 if ((np->driver_data & DEV_HAS_CHECKSUM))
4523 return ethtool_op_set_tso(dev, value);
4528 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4530 struct fe_priv *np = netdev_priv(dev);
4532 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4533 ring->rx_mini_max_pending = 0;
4534 ring->rx_jumbo_max_pending = 0;
4535 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4537 ring->rx_pending = np->rx_ring_size;
4538 ring->rx_mini_pending = 0;
4539 ring->rx_jumbo_pending = 0;
4540 ring->tx_pending = np->tx_ring_size;
4543 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4545 struct fe_priv *np = netdev_priv(dev);
4546 u8 __iomem *base = get_hwbase(dev);
4547 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4548 dma_addr_t ring_addr;
4550 if (ring->rx_pending < RX_RING_MIN ||
4551 ring->tx_pending < TX_RING_MIN ||
4552 ring->rx_mini_pending != 0 ||
4553 ring->rx_jumbo_pending != 0 ||
4554 (np->desc_ver == DESC_VER_1 &&
4555 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4556 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4557 (np->desc_ver != DESC_VER_1 &&
4558 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4559 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4563 /* allocate new rings */
4564 if (!nv_optimized(np)) {
4565 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4566 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4569 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4570 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4573 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4574 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4575 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4576 /* fall back to old rings */
4577 if (!nv_optimized(np)) {
4579 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4580 rxtx_ring, ring_addr);
4583 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4584 rxtx_ring, ring_addr);
4593 if (netif_running(dev)) {
4594 nv_disable_irq(dev);
4595 nv_napi_disable(dev);
4596 netif_tx_lock_bh(dev);
4597 netif_addr_lock(dev);
4598 spin_lock(&np->lock);
4608 /* set new values */
4609 np->rx_ring_size = ring->rx_pending;
4610 np->tx_ring_size = ring->tx_pending;
4612 if (!nv_optimized(np)) {
4613 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4614 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4616 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4617 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4619 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4620 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4621 np->ring_addr = ring_addr;
4623 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4624 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4626 if (netif_running(dev)) {
4627 /* reinit driver view of the queues */
4629 if (nv_init_ring(dev)) {
4630 if (!np->in_shutdown)
4631 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4634 /* reinit nic view of the queues */
4635 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4636 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4637 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4638 base + NvRegRingSizes);
4640 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4643 /* restart engines */
4645 spin_unlock(&np->lock);
4646 netif_addr_unlock(dev);
4647 netif_tx_unlock_bh(dev);
4648 nv_napi_enable(dev);
4656 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4658 struct fe_priv *np = netdev_priv(dev);
4660 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4661 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4662 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4665 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4667 struct fe_priv *np = netdev_priv(dev);
4670 if ((!np->autoneg && np->duplex == 0) ||
4671 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4672 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4676 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4677 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4681 netif_carrier_off(dev);
4682 if (netif_running(dev)) {
4683 nv_disable_irq(dev);
4684 netif_tx_lock_bh(dev);
4685 netif_addr_lock(dev);
4686 spin_lock(&np->lock);
4689 spin_unlock(&np->lock);
4690 netif_addr_unlock(dev);
4691 netif_tx_unlock_bh(dev);
4694 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4695 if (pause->rx_pause)
4696 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4697 if (pause->tx_pause)
4698 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4700 if (np->autoneg && pause->autoneg) {
4701 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4703 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4704 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4705 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4706 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4707 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4708 adv |= ADVERTISE_PAUSE_ASYM;
4709 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4711 if (netif_running(dev))
4712 printk(KERN_INFO "%s: link down.\n", dev->name);
4713 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4714 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4715 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4717 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4718 if (pause->rx_pause)
4719 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4720 if (pause->tx_pause)
4721 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4723 if (!netif_running(dev))
4724 nv_update_linkspeed(dev);
4726 nv_update_pause(dev, np->pause_flags);
4729 if (netif_running(dev)) {
4736 static u32 nv_get_rx_csum(struct net_device *dev)
4738 struct fe_priv *np = netdev_priv(dev);
4739 return (np->rx_csum) != 0;
4742 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4744 struct fe_priv *np = netdev_priv(dev);
4745 u8 __iomem *base = get_hwbase(dev);
4748 if (np->driver_data & DEV_HAS_CHECKSUM) {
4751 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4754 /* vlan is dependent on rx checksum offload */
4755 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4756 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4758 if (netif_running(dev)) {
4759 spin_lock_irq(&np->lock);
4760 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4761 spin_unlock_irq(&np->lock);
4770 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4772 struct fe_priv *np = netdev_priv(dev);
4774 if (np->driver_data & DEV_HAS_CHECKSUM)
4775 return ethtool_op_set_tx_csum(dev, data);
4780 static int nv_set_sg(struct net_device *dev, u32 data)
4782 struct fe_priv *np = netdev_priv(dev);
4784 if (np->driver_data & DEV_HAS_CHECKSUM)
4785 return ethtool_op_set_sg(dev, data);
4790 static int nv_get_sset_count(struct net_device *dev, int sset)
4792 struct fe_priv *np = netdev_priv(dev);
4796 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4797 return NV_TEST_COUNT_EXTENDED;
4799 return NV_TEST_COUNT_BASE;
4801 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4802 return NV_DEV_STATISTICS_V3_COUNT;
4803 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4804 return NV_DEV_STATISTICS_V2_COUNT;
4805 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4806 return NV_DEV_STATISTICS_V1_COUNT;
4814 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4816 struct fe_priv *np = netdev_priv(dev);
4819 nv_do_stats_poll((unsigned long)dev);
4821 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4824 static int nv_link_test(struct net_device *dev)
4826 struct fe_priv *np = netdev_priv(dev);
4829 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4830 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4832 /* check phy link status */
4833 if (!(mii_status & BMSR_LSTATUS))
4839 static int nv_register_test(struct net_device *dev)
4841 u8 __iomem *base = get_hwbase(dev);
4843 u32 orig_read, new_read;
4846 orig_read = readl(base + nv_registers_test[i].reg);
4848 /* xor with mask to toggle bits */
4849 orig_read ^= nv_registers_test[i].mask;
4851 writel(orig_read, base + nv_registers_test[i].reg);
4853 new_read = readl(base + nv_registers_test[i].reg);
4855 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4858 /* restore original value */
4859 orig_read ^= nv_registers_test[i].mask;
4860 writel(orig_read, base + nv_registers_test[i].reg);
4862 } while (nv_registers_test[++i].reg != 0);
4867 static int nv_interrupt_test(struct net_device *dev)
4869 struct fe_priv *np = netdev_priv(dev);
4870 u8 __iomem *base = get_hwbase(dev);
4873 u32 save_msi_flags, save_poll_interval = 0;
4875 if (netif_running(dev)) {
4876 /* free current irq */
4878 save_poll_interval = readl(base+NvRegPollingInterval);
4881 /* flag to test interrupt handler */
4884 /* setup test irq */
4885 save_msi_flags = np->msi_flags;
4886 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4887 np->msi_flags |= 0x001; /* setup 1 vector */
4888 if (nv_request_irq(dev, 1))
4891 /* setup timer interrupt */
4892 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4893 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4895 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4897 /* wait for at least one interrupt */
4900 spin_lock_irq(&np->lock);
4902 /* flag should be set within ISR */
4903 testcnt = np->intr_test;
4907 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4908 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4909 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4911 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4913 spin_unlock_irq(&np->lock);
4917 np->msi_flags = save_msi_flags;
4919 if (netif_running(dev)) {
4920 writel(save_poll_interval, base + NvRegPollingInterval);
4921 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4922 /* restore original irq */
4923 if (nv_request_irq(dev, 0))
4930 static int nv_loopback_test(struct net_device *dev)
4932 struct fe_priv *np = netdev_priv(dev);
4933 u8 __iomem *base = get_hwbase(dev);
4934 struct sk_buff *tx_skb, *rx_skb;
4935 dma_addr_t test_dma_addr;
4936 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4938 int len, i, pkt_len;
4940 u32 filter_flags = 0;
4941 u32 misc1_flags = 0;
4944 if (netif_running(dev)) {
4945 nv_disable_irq(dev);
4946 filter_flags = readl(base + NvRegPacketFilterFlags);
4947 misc1_flags = readl(base + NvRegMisc1);
4952 /* reinit driver view of the rx queue */
4956 /* setup hardware for loopback */
4957 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4958 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4960 /* reinit nic view of the rx queue */
4961 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4962 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4963 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4964 base + NvRegRingSizes);
4967 /* restart rx engine */
4970 /* setup packet for tx */
4971 pkt_len = ETH_DATA_LEN;
4972 tx_skb = dev_alloc_skb(pkt_len);
4974 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4975 " of %s\n", dev->name);
4979 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4980 skb_tailroom(tx_skb),
4981 PCI_DMA_FROMDEVICE);
4982 pkt_data = skb_put(tx_skb, pkt_len);
4983 for (i = 0; i < pkt_len; i++)
4984 pkt_data[i] = (u8)(i & 0xff);
4986 if (!nv_optimized(np)) {
4987 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4988 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4990 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4991 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4992 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4994 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4995 pci_push(get_hwbase(dev));
4999 /* check for rx of the packet */
5000 if (!nv_optimized(np)) {
5001 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5002 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5005 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5006 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5009 if (flags & NV_RX_AVAIL) {
5011 } else if (np->desc_ver == DESC_VER_1) {
5012 if (flags & NV_RX_ERROR)
5015 if (flags & NV_RX2_ERROR) {
5021 if (len != pkt_len) {
5023 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
5024 dev->name, len, pkt_len);
5026 rx_skb = np->rx_skb[0].skb;
5027 for (i = 0; i < pkt_len; i++) {
5028 if (rx_skb->data[i] != (u8)(i & 0xff)) {
5030 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
5037 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5040 pci_unmap_page(np->pci_dev, test_dma_addr,
5041 (skb_end_pointer(tx_skb) - tx_skb->data),
5043 dev_kfree_skb_any(tx_skb);
5048 /* drain rx queue */
5051 if (netif_running(dev)) {
5052 writel(misc1_flags, base + NvRegMisc1);
5053 writel(filter_flags, base + NvRegPacketFilterFlags);
5060 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5062 struct fe_priv *np = netdev_priv(dev);
5063 u8 __iomem *base = get_hwbase(dev);
5065 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
5067 if (!nv_link_test(dev)) {
5068 test->flags |= ETH_TEST_FL_FAILED;
5072 if (test->flags & ETH_TEST_FL_OFFLINE) {
5073 if (netif_running(dev)) {
5074 netif_stop_queue(dev);
5075 nv_napi_disable(dev);
5076 netif_tx_lock_bh(dev);
5077 netif_addr_lock(dev);
5078 spin_lock_irq(&np->lock);
5079 nv_disable_hw_interrupts(dev, np->irqmask);
5080 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
5081 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5083 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5088 /* drain rx queue */
5090 spin_unlock_irq(&np->lock);
5091 netif_addr_unlock(dev);
5092 netif_tx_unlock_bh(dev);
5095 if (!nv_register_test(dev)) {
5096 test->flags |= ETH_TEST_FL_FAILED;
5100 result = nv_interrupt_test(dev);
5102 test->flags |= ETH_TEST_FL_FAILED;
5110 if (!nv_loopback_test(dev)) {
5111 test->flags |= ETH_TEST_FL_FAILED;
5115 if (netif_running(dev)) {
5116 /* reinit driver view of the rx queue */
5118 if (nv_init_ring(dev)) {
5119 if (!np->in_shutdown)
5120 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5122 /* reinit nic view of the rx queue */
5123 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5124 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5125 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5126 base + NvRegRingSizes);
5128 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5130 /* restart rx engine */
5132 netif_start_queue(dev);
5133 nv_napi_enable(dev);
5134 nv_enable_hw_interrupts(dev, np->irqmask);
5139 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5141 switch (stringset) {
5143 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5146 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5151 static const struct ethtool_ops ops = {
5152 .get_drvinfo = nv_get_drvinfo,
5153 .get_link = ethtool_op_get_link,
5154 .get_wol = nv_get_wol,
5155 .set_wol = nv_set_wol,
5156 .get_settings = nv_get_settings,
5157 .set_settings = nv_set_settings,
5158 .get_regs_len = nv_get_regs_len,
5159 .get_regs = nv_get_regs,
5160 .nway_reset = nv_nway_reset,
5161 .set_tso = nv_set_tso,
5162 .get_ringparam = nv_get_ringparam,
5163 .set_ringparam = nv_set_ringparam,
5164 .get_pauseparam = nv_get_pauseparam,
5165 .set_pauseparam = nv_set_pauseparam,
5166 .get_rx_csum = nv_get_rx_csum,
5167 .set_rx_csum = nv_set_rx_csum,
5168 .set_tx_csum = nv_set_tx_csum,
5169 .set_sg = nv_set_sg,
5170 .get_strings = nv_get_strings,
5171 .get_ethtool_stats = nv_get_ethtool_stats,
5172 .get_sset_count = nv_get_sset_count,
5173 .self_test = nv_self_test,
5176 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5178 struct fe_priv *np = get_nvpriv(dev);
5180 spin_lock_irq(&np->lock);
5182 /* save vlan group */
5186 /* enable vlan on MAC */
5187 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5189 /* disable vlan on MAC */
5190 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5191 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5194 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5196 spin_unlock_irq(&np->lock);
5199 /* The mgmt unit and driver use a semaphore to access the phy during init */
5200 static int nv_mgmt_acquire_sema(struct net_device *dev)
5202 struct fe_priv *np = netdev_priv(dev);
5203 u8 __iomem *base = get_hwbase(dev);
5205 u32 tx_ctrl, mgmt_sema;
5207 for (i = 0; i < 10; i++) {
5208 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5209 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5214 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5217 for (i = 0; i < 2; i++) {
5218 tx_ctrl = readl(base + NvRegTransmitterControl);
5219 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5220 writel(tx_ctrl, base + NvRegTransmitterControl);
5222 /* verify that semaphore was acquired */
5223 tx_ctrl = readl(base + NvRegTransmitterControl);
5224 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5225 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5236 static void nv_mgmt_release_sema(struct net_device *dev)
5238 struct fe_priv *np = netdev_priv(dev);
5239 u8 __iomem *base = get_hwbase(dev);
5242 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5243 if (np->mgmt_sema) {
5244 tx_ctrl = readl(base + NvRegTransmitterControl);
5245 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5246 writel(tx_ctrl, base + NvRegTransmitterControl);
5252 static int nv_mgmt_get_version(struct net_device *dev)
5254 struct fe_priv *np = netdev_priv(dev);
5255 u8 __iomem *base = get_hwbase(dev);
5256 u32 data_ready = readl(base + NvRegTransmitterControl);
5257 u32 data_ready2 = 0;
5258 unsigned long start;
5261 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5262 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5264 while (time_before(jiffies, start + 5*HZ)) {
5265 data_ready2 = readl(base + NvRegTransmitterControl);
5266 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5270 schedule_timeout_uninterruptible(1);
5273 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5276 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5281 static int nv_open(struct net_device *dev)
5283 struct fe_priv *np = netdev_priv(dev);
5284 u8 __iomem *base = get_hwbase(dev);
5289 dprintk(KERN_DEBUG "nv_open: begin\n");
5292 mii_rw(dev, np->phyaddr, MII_BMCR,
5293 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5295 /* erase previous misconfiguration */
5296 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5298 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5299 writel(0, base + NvRegMulticastAddrB);
5300 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5301 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5302 writel(0, base + NvRegPacketFilterFlags);
5304 writel(0, base + NvRegTransmitterControl);
5305 writel(0, base + NvRegReceiverControl);
5307 writel(0, base + NvRegAdapterControl);
5309 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5310 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5312 /* initialize descriptor rings */
5314 oom = nv_init_ring(dev);
5316 writel(0, base + NvRegLinkSpeed);
5317 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5319 writel(0, base + NvRegUnknownSetupReg6);
5321 np->in_shutdown = 0;
5324 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5325 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5326 base + NvRegRingSizes);
5328 writel(np->linkspeed, base + NvRegLinkSpeed);
5329 if (np->desc_ver == DESC_VER_1)
5330 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5332 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5333 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5334 writel(np->vlanctl_bits, base + NvRegVlanControl);
5336 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5337 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5338 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5339 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5341 writel(0, base + NvRegMIIMask);
5342 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5343 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5345 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5346 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5347 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5348 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5350 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5352 get_random_bytes(&low, sizeof(low));
5353 low &= NVREG_SLOTTIME_MASK;
5354 if (np->desc_ver == DESC_VER_1) {
5355 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5357 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5358 /* setup legacy backoff */
5359 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5361 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5362 nv_gear_backoff_reseed(dev);
5365 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5366 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5367 if (poll_interval == -1) {
5368 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5369 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5371 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5374 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5375 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5376 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5377 base + NvRegAdapterControl);
5378 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5379 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5381 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5383 i = readl(base + NvRegPowerState);
5384 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
5385 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5389 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5391 nv_disable_hw_interrupts(dev, np->irqmask);
5393 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5394 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5397 if (nv_request_irq(dev, 0)) {
5401 /* ask for interrupts */
5402 nv_enable_hw_interrupts(dev, np->irqmask);
5404 spin_lock_irq(&np->lock);
5405 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5406 writel(0, base + NvRegMulticastAddrB);
5407 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5408 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5409 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5410 /* One manual link speed update: Interrupts are enabled, future link
5411 * speed changes cause interrupts and are handled by nv_link_irq().
5415 miistat = readl(base + NvRegMIIStatus);
5416 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5417 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5419 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5422 ret = nv_update_linkspeed(dev);
5424 netif_start_queue(dev);
5425 nv_napi_enable(dev);
5428 netif_carrier_on(dev);
5430 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
5431 netif_carrier_off(dev);
5434 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5436 /* start statistics timer */
5437 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5438 mod_timer(&np->stats_poll,
5439 round_jiffies(jiffies + STATS_INTERVAL));
5441 spin_unlock_irq(&np->lock);
5449 static int nv_close(struct net_device *dev)
5451 struct fe_priv *np = netdev_priv(dev);
5454 spin_lock_irq(&np->lock);
5455 np->in_shutdown = 1;
5456 spin_unlock_irq(&np->lock);
5457 nv_napi_disable(dev);
5458 synchronize_irq(np->pci_dev->irq);
5460 del_timer_sync(&np->oom_kick);
5461 del_timer_sync(&np->nic_poll);
5462 del_timer_sync(&np->stats_poll);
5464 netif_stop_queue(dev);
5465 spin_lock_irq(&np->lock);
5469 /* disable interrupts on the nic or we will lock up */
5470 base = get_hwbase(dev);
5471 nv_disable_hw_interrupts(dev, np->irqmask);
5473 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5475 spin_unlock_irq(&np->lock);
5481 if (np->wolenabled) {
5482 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5485 /* power down phy */
5486 mii_rw(dev, np->phyaddr, MII_BMCR,
5487 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5490 /* FIXME: power down nic */
5495 static const struct net_device_ops nv_netdev_ops = {
5496 .ndo_open = nv_open,
5497 .ndo_stop = nv_close,
5498 .ndo_get_stats = nv_get_stats,
5499 .ndo_start_xmit = nv_start_xmit,
5500 .ndo_tx_timeout = nv_tx_timeout,
5501 .ndo_change_mtu = nv_change_mtu,
5502 .ndo_validate_addr = eth_validate_addr,
5503 .ndo_set_mac_address = nv_set_mac_address,
5504 .ndo_set_multicast_list = nv_set_multicast,
5505 .ndo_vlan_rx_register = nv_vlan_rx_register,
5506 #ifdef CONFIG_NET_POLL_CONTROLLER
5507 .ndo_poll_controller = nv_poll_controller,
5511 static const struct net_device_ops nv_netdev_ops_optimized = {
5512 .ndo_open = nv_open,
5513 .ndo_stop = nv_close,
5514 .ndo_get_stats = nv_get_stats,
5515 .ndo_start_xmit = nv_start_xmit_optimized,
5516 .ndo_tx_timeout = nv_tx_timeout,
5517 .ndo_change_mtu = nv_change_mtu,
5518 .ndo_validate_addr = eth_validate_addr,
5519 .ndo_set_mac_address = nv_set_mac_address,
5520 .ndo_set_multicast_list = nv_set_multicast,
5521 .ndo_vlan_rx_register = nv_vlan_rx_register,
5522 #ifdef CONFIG_NET_POLL_CONTROLLER
5523 .ndo_poll_controller = nv_poll_controller,
5527 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5529 struct net_device *dev;
5534 u32 powerstate, txreg;
5535 u32 phystate_orig = 0, phystate;
5536 int phyinitialized = 0;
5537 static int printed_version;
5539 if (!printed_version++)
5540 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5541 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5543 dev = alloc_etherdev(sizeof(struct fe_priv));
5548 np = netdev_priv(dev);
5550 np->pci_dev = pci_dev;
5551 spin_lock_init(&np->lock);
5552 SET_NETDEV_DEV(dev, &pci_dev->dev);
5554 init_timer(&np->oom_kick);
5555 np->oom_kick.data = (unsigned long) dev;
5556 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5557 init_timer(&np->nic_poll);
5558 np->nic_poll.data = (unsigned long) dev;
5559 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
5560 init_timer(&np->stats_poll);
5561 np->stats_poll.data = (unsigned long) dev;
5562 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
5564 err = pci_enable_device(pci_dev);
5568 pci_set_master(pci_dev);
5570 err = pci_request_regions(pci_dev, DRV_NAME);
5574 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5575 np->register_size = NV_PCI_REGSZ_VER3;
5576 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5577 np->register_size = NV_PCI_REGSZ_VER2;
5579 np->register_size = NV_PCI_REGSZ_VER1;
5583 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5584 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5585 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5586 pci_resource_len(pci_dev, i),
5587 pci_resource_flags(pci_dev, i));
5588 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5589 pci_resource_len(pci_dev, i) >= np->register_size) {
5590 addr = pci_resource_start(pci_dev, i);
5594 if (i == DEVICE_COUNT_RESOURCE) {
5595 dev_printk(KERN_INFO, &pci_dev->dev,
5596 "Couldn't find register window\n");
5600 /* copy of driver data */
5601 np->driver_data = id->driver_data;
5602 /* copy of device id */
5603 np->device_id = id->device;
5605 /* handle different descriptor versions */
5606 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5607 /* packet format 3: supports 40-bit addressing */
5608 np->desc_ver = DESC_VER_3;
5609 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5611 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK))
5612 dev_printk(KERN_INFO, &pci_dev->dev,
5613 "64-bit DMA failed, using 32-bit addressing\n");
5615 dev->features |= NETIF_F_HIGHDMA;
5616 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5617 dev_printk(KERN_INFO, &pci_dev->dev,
5618 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5621 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5622 /* packet format 2: supports jumbo frames */
5623 np->desc_ver = DESC_VER_2;
5624 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5626 /* original packet format */
5627 np->desc_ver = DESC_VER_1;
5628 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5631 np->pkt_limit = NV_PKTLIMIT_1;
5632 if (id->driver_data & DEV_HAS_LARGEDESC)
5633 np->pkt_limit = NV_PKTLIMIT_2;
5635 if (id->driver_data & DEV_HAS_CHECKSUM) {
5637 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5638 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5639 dev->features |= NETIF_F_TSO;
5642 np->vlanctl_bits = 0;
5643 if (id->driver_data & DEV_HAS_VLAN) {
5644 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5645 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5649 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5650 np->msi_flags |= NV_MSI_CAPABLE;
5652 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5653 /* msix has had reported issues when modifying irqmask
5654 as in the case of napi, therefore, disable for now
5656 #ifndef CONFIG_FORCEDETH_NAPI
5657 np->msi_flags |= NV_MSI_X_CAPABLE;
5661 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5662 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5663 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5664 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5665 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5670 np->base = ioremap(addr, np->register_size);
5673 dev->base_addr = (unsigned long)np->base;
5675 dev->irq = pci_dev->irq;
5677 np->rx_ring_size = RX_RING_DEFAULT;
5678 np->tx_ring_size = TX_RING_DEFAULT;
5680 if (!nv_optimized(np)) {
5681 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5682 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5684 if (!np->rx_ring.orig)
5686 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5688 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5689 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5691 if (!np->rx_ring.ex)
5693 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5695 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5696 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5697 if (!np->rx_skb || !np->tx_skb)
5700 if (!nv_optimized(np))
5701 dev->netdev_ops = &nv_netdev_ops;
5703 dev->netdev_ops = &nv_netdev_ops_optimized;
5705 #ifdef CONFIG_FORCEDETH_NAPI
5706 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5708 SET_ETHTOOL_OPS(dev, &ops);
5709 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5711 pci_set_drvdata(pci_dev, dev);
5713 /* read the mac address */
5714 base = get_hwbase(dev);
5715 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5716 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5718 /* check the workaround bit for correct mac address order */
5719 txreg = readl(base + NvRegTransmitPoll);
5720 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5721 /* mac address is already in correct order */
5722 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5723 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5724 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5725 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5726 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5727 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5728 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5729 /* mac address is already in correct order */
5730 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5731 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5732 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5733 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5734 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5735 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5737 * Set orig mac address back to the reversed version.
5738 * This flag will be cleared during low power transition.
5739 * Therefore, we should always put back the reversed address.
5741 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5742 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5743 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5745 /* need to reverse mac address to correct order */
5746 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5747 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5748 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5749 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5750 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5751 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5752 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5753 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
5755 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5757 if (!is_valid_ether_addr(dev->perm_addr)) {
5759 * Bad mac address. At least one bios sets the mac address
5760 * to 01:23:45:67:89:ab
5762 dev_printk(KERN_ERR, &pci_dev->dev,
5763 "Invalid Mac address detected: %pM\n",
5765 dev_printk(KERN_ERR, &pci_dev->dev,
5766 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5767 dev->dev_addr[0] = 0x00;
5768 dev->dev_addr[1] = 0x00;
5769 dev->dev_addr[2] = 0x6c;
5770 get_random_bytes(&dev->dev_addr[3], 3);
5773 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5774 pci_name(pci_dev), dev->dev_addr);
5776 /* set mac address */
5777 nv_copy_mac_to_hw(dev);
5779 /* Workaround current PCI init glitch: wakeup bits aren't
5780 * being set from PCI PM capability.
5782 device_init_wakeup(&pci_dev->dev, 1);
5785 writel(0, base + NvRegWakeUpFlags);
5788 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5790 /* take phy and nic out of low power mode */
5791 powerstate = readl(base + NvRegPowerState2);
5792 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5793 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5794 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5795 pci_dev->revision >= 0xA3)
5796 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5797 writel(powerstate, base + NvRegPowerState2);
5800 if (np->desc_ver == DESC_VER_1) {
5801 np->tx_flags = NV_TX_VALID;
5803 np->tx_flags = NV_TX2_VALID;
5805 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5806 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5807 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5808 np->msi_flags |= 0x0003;
5810 np->irqmask = NVREG_IRQMASK_CPU;
5811 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5812 np->msi_flags |= 0x0001;
5815 if (id->driver_data & DEV_NEED_TIMERIRQ)
5816 np->irqmask |= NVREG_IRQ_TIMER;
5817 if (id->driver_data & DEV_NEED_LINKTIMER) {
5818 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5819 np->need_linktimer = 1;
5820 np->link_timeout = jiffies + LINK_TIMEOUT;
5822 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5823 np->need_linktimer = 0;
5826 /* Limit the number of tx's outstanding for hw bug */
5827 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5829 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_32 ||
5830 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_33 ||
5831 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_34 ||
5832 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_35 ||
5833 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_36 ||
5834 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_37 ||
5835 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_38 ||
5836 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_39) &&
5837 pci_dev->revision >= 0xA2)
5841 /* clear phy state and temporarily halt phy interrupts */
5842 writel(0, base + NvRegMIIMask);
5843 phystate = readl(base + NvRegAdapterControl);
5844 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5846 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5847 writel(phystate, base + NvRegAdapterControl);
5849 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5851 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5852 /* management unit running on the mac? */
5853 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5854 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5855 nv_mgmt_acquire_sema(dev) &&
5856 nv_mgmt_get_version(dev)) {
5858 if (np->mgmt_version > 0) {
5859 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5861 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5862 pci_name(pci_dev), np->mac_in_use);
5863 /* management unit setup the phy already? */
5864 if (np->mac_in_use &&
5865 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5866 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5867 /* phy is inited by mgmt unit */
5869 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5872 /* we need to init the phy */
5877 /* find a suitable phy */
5878 for (i = 1; i <= 32; i++) {
5880 int phyaddr = i & 0x1F;
5882 spin_lock_irq(&np->lock);
5883 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5884 spin_unlock_irq(&np->lock);
5885 if (id1 < 0 || id1 == 0xffff)
5887 spin_lock_irq(&np->lock);
5888 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5889 spin_unlock_irq(&np->lock);
5890 if (id2 < 0 || id2 == 0xffff)
5893 np->phy_model = id2 & PHYID2_MODEL_MASK;
5894 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5895 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5896 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5897 pci_name(pci_dev), id1, id2, phyaddr);
5898 np->phyaddr = phyaddr;
5899 np->phy_oui = id1 | id2;
5901 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5902 if (np->phy_oui == PHY_OUI_REALTEK2)
5903 np->phy_oui = PHY_OUI_REALTEK;
5904 /* Setup phy revision for Realtek */
5905 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5906 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5911 dev_printk(KERN_INFO, &pci_dev->dev,
5912 "open: Could not find a valid PHY.\n");
5916 if (!phyinitialized) {
5920 /* see if it is a gigabit phy */
5921 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5922 if (mii_status & PHY_GIGABIT) {
5923 np->gigabit = PHY_GIGABIT;
5927 /* set default link speed settings */
5928 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5932 err = register_netdev(dev);
5934 dev_printk(KERN_INFO, &pci_dev->dev,
5935 "unable to register netdev: %d\n", err);
5939 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5940 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5951 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5952 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5953 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5955 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5957 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5958 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5959 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5960 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5961 np->need_linktimer ? "lnktim " : "",
5962 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5963 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5970 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5971 pci_set_drvdata(pci_dev, NULL);
5975 iounmap(get_hwbase(dev));
5977 pci_release_regions(pci_dev);
5979 pci_disable_device(pci_dev);
5986 static void nv_restore_phy(struct net_device *dev)
5988 struct fe_priv *np = netdev_priv(dev);
5989 u16 phy_reserved, mii_control;
5991 if (np->phy_oui == PHY_OUI_REALTEK &&
5992 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5993 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5994 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5995 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5996 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5997 phy_reserved |= PHY_REALTEK_INIT8;
5998 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5999 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6001 /* restart auto negotiation */
6002 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6003 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6004 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6008 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6010 struct net_device *dev = pci_get_drvdata(pci_dev);
6011 struct fe_priv *np = netdev_priv(dev);
6012 u8 __iomem *base = get_hwbase(dev);
6014 /* special op: write back the misordered MAC address - otherwise
6015 * the next nv_probe would see a wrong address.
6017 writel(np->orig_mac[0], base + NvRegMacAddrA);
6018 writel(np->orig_mac[1], base + NvRegMacAddrB);
6019 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6020 base + NvRegTransmitPoll);
6023 static void __devexit nv_remove(struct pci_dev *pci_dev)
6025 struct net_device *dev = pci_get_drvdata(pci_dev);
6027 unregister_netdev(dev);
6029 nv_restore_mac_addr(pci_dev);
6031 /* restore any phy related changes */
6032 nv_restore_phy(dev);
6034 nv_mgmt_release_sema(dev);
6036 /* free all structures */
6038 iounmap(get_hwbase(dev));
6039 pci_release_regions(pci_dev);
6040 pci_disable_device(pci_dev);
6042 pci_set_drvdata(pci_dev, NULL);
6046 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
6048 struct net_device *dev = pci_get_drvdata(pdev);
6049 struct fe_priv *np = netdev_priv(dev);
6050 u8 __iomem *base = get_hwbase(dev);
6053 if (netif_running(dev)) {
6057 netif_device_detach(dev);
6059 /* save non-pci configuration space */
6060 for (i = 0;i <= np->register_size/sizeof(u32); i++)
6061 np->saved_config_space[i] = readl(base + i*sizeof(u32));
6063 pci_save_state(pdev);
6064 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
6065 pci_disable_device(pdev);
6066 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6070 static int nv_resume(struct pci_dev *pdev)
6072 struct net_device *dev = pci_get_drvdata(pdev);
6073 struct fe_priv *np = netdev_priv(dev);
6074 u8 __iomem *base = get_hwbase(dev);
6077 pci_set_power_state(pdev, PCI_D0);
6078 pci_restore_state(pdev);
6079 /* ack any pending wake events, disable PME */
6080 pci_enable_wake(pdev, PCI_D0, 0);
6082 /* restore non-pci configuration space */
6083 for (i = 0;i <= np->register_size/sizeof(u32); i++)
6084 writel(np->saved_config_space[i], base+i*sizeof(u32));
6086 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6088 netif_device_attach(dev);
6089 if (netif_running(dev)) {
6091 nv_set_multicast(dev);
6096 static void nv_shutdown(struct pci_dev *pdev)
6098 struct net_device *dev = pci_get_drvdata(pdev);
6099 struct fe_priv *np = netdev_priv(dev);
6101 if (netif_running(dev))
6105 * Restore the MAC so a kernel started by kexec won't get confused.
6106 * If we really go for poweroff, we must not restore the MAC,
6107 * otherwise the MAC for WOL will be reversed at least on some boards.
6109 if (system_state != SYSTEM_POWER_OFF) {
6110 nv_restore_mac_addr(pdev);
6113 pci_disable_device(pdev);
6115 * Apparently it is not possible to reinitialise from D3 hot,
6116 * only put the device into D3 if we really go for poweroff.
6118 if (system_state == SYSTEM_POWER_OFF) {
6119 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
6120 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
6121 pci_set_power_state(pdev, PCI_D3hot);
6125 #define nv_suspend NULL
6126 #define nv_shutdown NULL
6127 #define nv_resume NULL
6128 #endif /* CONFIG_PM */
6130 static struct pci_device_id pci_tbl[] = {
6131 { /* nForce Ethernet Controller */
6132 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
6133 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6135 { /* nForce2 Ethernet Controller */
6136 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
6137 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6139 { /* nForce3 Ethernet Controller */
6140 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
6141 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6143 { /* nForce3 Ethernet Controller */
6144 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
6145 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6147 { /* nForce3 Ethernet Controller */
6148 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
6149 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6151 { /* nForce3 Ethernet Controller */
6152 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
6153 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6155 { /* nForce3 Ethernet Controller */
6156 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
6157 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6159 { /* CK804 Ethernet Controller */
6160 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
6161 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6163 { /* CK804 Ethernet Controller */
6164 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
6165 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6167 { /* MCP04 Ethernet Controller */
6168 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
6169 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6171 { /* MCP04 Ethernet Controller */
6172 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
6173 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6175 { /* MCP51 Ethernet Controller */
6176 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
6177 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6179 { /* MCP51 Ethernet Controller */
6180 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
6181 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
6183 { /* MCP55 Ethernet Controller */
6184 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
6185 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6187 { /* MCP55 Ethernet Controller */
6188 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
6189 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT,
6191 { /* MCP61 Ethernet Controller */
6192 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
6193 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6195 { /* MCP61 Ethernet Controller */
6196 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
6197 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6199 { /* MCP61 Ethernet Controller */
6200 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
6201 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6203 { /* MCP61 Ethernet Controller */
6204 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
6205 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
6207 { /* MCP65 Ethernet Controller */
6208 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6209 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6211 { /* MCP65 Ethernet Controller */
6212 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6213 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6215 { /* MCP65 Ethernet Controller */
6216 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6217 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6219 { /* MCP65 Ethernet Controller */
6220 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6221 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6223 { /* MCP67 Ethernet Controller */
6224 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
6225 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6227 { /* MCP67 Ethernet Controller */
6228 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
6229 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6231 { /* MCP67 Ethernet Controller */
6232 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
6233 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6235 { /* MCP67 Ethernet Controller */
6236 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
6237 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE,
6239 { /* MCP73 Ethernet Controller */
6240 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
6241 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6243 { /* MCP73 Ethernet Controller */
6244 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
6245 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6247 { /* MCP73 Ethernet Controller */
6248 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
6249 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6251 { /* MCP73 Ethernet Controller */
6252 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
6253 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE,
6255 { /* MCP77 Ethernet Controller */
6256 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
6257 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6259 { /* MCP77 Ethernet Controller */
6260 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
6261 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6263 { /* MCP77 Ethernet Controller */
6264 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
6265 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6267 { /* MCP77 Ethernet Controller */
6268 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
6269 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6271 { /* MCP79 Ethernet Controller */
6272 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6273 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6275 { /* MCP79 Ethernet Controller */
6276 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
6277 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6279 { /* MCP79 Ethernet Controller */
6280 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
6281 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6283 { /* MCP79 Ethernet Controller */
6284 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
6285 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6290 static struct pci_driver driver = {
6292 .id_table = pci_tbl,
6294 .remove = __devexit_p(nv_remove),
6295 .suspend = nv_suspend,
6296 .resume = nv_resume,
6297 .shutdown = nv_shutdown,
6300 static int __init init_nic(void)
6302 return pci_register_driver(&driver);
6305 static void __exit exit_nic(void)
6307 pci_unregister_driver(&driver);
6310 module_param(max_interrupt_work, int, 0);
6311 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6312 module_param(optimization_mode, int, 0);
6313 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
6314 module_param(poll_interval, int, 0);
6315 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6316 module_param(msi, int, 0);
6317 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6318 module_param(msix, int, 0);
6319 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6320 module_param(dma_64bit, int, 0);
6321 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6322 module_param(phy_cross, int, 0);
6323 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6325 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6326 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6327 MODULE_LICENSE("GPL");
6329 MODULE_DEVICE_TABLE(pci, pci_tbl);
6331 module_init(init_nic);
6332 module_exit(exit_nic);