2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,5,6 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
126 #ifdef CONFIG_FORCEDETH_NAPI
127 #define DRIVERNAPI "-NAPI"
131 #define FORCEDETH_VERSION "0.60"
132 #define DRV_NAME "forcedeth"
134 #include <linux/module.h>
135 #include <linux/types.h>
136 #include <linux/pci.h>
137 #include <linux/interrupt.h>
138 #include <linux/netdevice.h>
139 #include <linux/etherdevice.h>
140 #include <linux/delay.h>
141 #include <linux/spinlock.h>
142 #include <linux/ethtool.h>
143 #include <linux/timer.h>
144 #include <linux/skbuff.h>
145 #include <linux/mii.h>
146 #include <linux/random.h>
147 #include <linux/init.h>
148 #include <linux/if_vlan.h>
149 #include <linux/dma-mapping.h>
153 #include <asm/uaccess.h>
154 #include <asm/system.h>
157 #define dprintk printk
159 #define dprintk(x...) do { } while (0)
167 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
168 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
169 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
170 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
171 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
172 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
173 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
174 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
175 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
176 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
177 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
178 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
179 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
183 NvRegIrqStatus = 0x000,
184 #define NVREG_IRQSTAT_MIIEVENT 0x040
185 #define NVREG_IRQSTAT_MASK 0x81ff
186 NvRegIrqMask = 0x004,
187 #define NVREG_IRQ_RX_ERROR 0x0001
188 #define NVREG_IRQ_RX 0x0002
189 #define NVREG_IRQ_RX_NOBUF 0x0004
190 #define NVREG_IRQ_TX_ERR 0x0008
191 #define NVREG_IRQ_TX_OK 0x0010
192 #define NVREG_IRQ_TIMER 0x0020
193 #define NVREG_IRQ_LINK 0x0040
194 #define NVREG_IRQ_RX_FORCED 0x0080
195 #define NVREG_IRQ_TX_FORCED 0x0100
196 #define NVREG_IRQ_RECOVER_ERROR 0x8000
197 #define NVREG_IRQMASK_THROUGHPUT 0x00df
198 #define NVREG_IRQMASK_CPU 0x0060
199 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
200 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
201 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
203 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
204 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
205 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
207 NvRegUnknownSetupReg6 = 0x008,
208 #define NVREG_UNKSETUP6_VAL 3
211 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
214 NvRegPollingInterval = 0x00c,
215 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
216 #define NVREG_POLL_DEFAULT_CPU 13
217 NvRegMSIMap0 = 0x020,
218 NvRegMSIMap1 = 0x024,
219 NvRegMSIIrqMask = 0x030,
220 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
222 #define NVREG_MISC1_PAUSE_TX 0x01
223 #define NVREG_MISC1_HD 0x02
224 #define NVREG_MISC1_FORCE 0x3b0f3c
226 NvRegMacReset = 0x3c,
227 #define NVREG_MAC_RESET_ASSERT 0x0F3
228 NvRegTransmitterControl = 0x084,
229 #define NVREG_XMITCTL_START 0x01
230 #define NVREG_XMITCTL_MGMT_ST 0x40000000
231 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
232 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
233 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
234 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
235 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
236 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
237 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
238 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
239 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
240 NvRegTransmitterStatus = 0x088,
241 #define NVREG_XMITSTAT_BUSY 0x01
243 NvRegPacketFilterFlags = 0x8c,
244 #define NVREG_PFF_PAUSE_RX 0x08
245 #define NVREG_PFF_ALWAYS 0x7F0000
246 #define NVREG_PFF_PROMISC 0x80
247 #define NVREG_PFF_MYADDR 0x20
248 #define NVREG_PFF_LOOPBACK 0x10
250 NvRegOffloadConfig = 0x90,
251 #define NVREG_OFFLOAD_HOMEPHY 0x601
252 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
253 NvRegReceiverControl = 0x094,
254 #define NVREG_RCVCTL_START 0x01
255 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
256 NvRegReceiverStatus = 0x98,
257 #define NVREG_RCVSTAT_BUSY 0x01
259 NvRegRandomSeed = 0x9c,
260 #define NVREG_RNDSEED_MASK 0x00ff
261 #define NVREG_RNDSEED_FORCE 0x7f00
262 #define NVREG_RNDSEED_FORCE2 0x2d00
263 #define NVREG_RNDSEED_FORCE3 0x7400
265 NvRegTxDeferral = 0xA0,
266 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
267 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
268 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
269 NvRegRxDeferral = 0xA4,
270 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
271 NvRegMacAddrA = 0xA8,
272 NvRegMacAddrB = 0xAC,
273 NvRegMulticastAddrA = 0xB0,
274 #define NVREG_MCASTADDRA_FORCE 0x01
275 NvRegMulticastAddrB = 0xB4,
276 NvRegMulticastMaskA = 0xB8,
277 NvRegMulticastMaskB = 0xBC,
279 NvRegPhyInterface = 0xC0,
280 #define PHY_RGMII 0x10000000
282 NvRegTxRingPhysAddr = 0x100,
283 NvRegRxRingPhysAddr = 0x104,
284 NvRegRingSizes = 0x108,
285 #define NVREG_RINGSZ_TXSHIFT 0
286 #define NVREG_RINGSZ_RXSHIFT 16
287 NvRegTransmitPoll = 0x10c,
288 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
289 NvRegLinkSpeed = 0x110,
290 #define NVREG_LINKSPEED_FORCE 0x10000
291 #define NVREG_LINKSPEED_10 1000
292 #define NVREG_LINKSPEED_100 100
293 #define NVREG_LINKSPEED_1000 50
294 #define NVREG_LINKSPEED_MASK (0xFFF)
295 NvRegUnknownSetupReg5 = 0x130,
296 #define NVREG_UNKSETUP5_BIT31 (1<<31)
297 NvRegTxWatermark = 0x13c,
298 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
299 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
300 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
301 NvRegTxRxControl = 0x144,
302 #define NVREG_TXRXCTL_KICK 0x0001
303 #define NVREG_TXRXCTL_BIT1 0x0002
304 #define NVREG_TXRXCTL_BIT2 0x0004
305 #define NVREG_TXRXCTL_IDLE 0x0008
306 #define NVREG_TXRXCTL_RESET 0x0010
307 #define NVREG_TXRXCTL_RXCHECK 0x0400
308 #define NVREG_TXRXCTL_DESC_1 0
309 #define NVREG_TXRXCTL_DESC_2 0x002100
310 #define NVREG_TXRXCTL_DESC_3 0xc02200
311 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
312 #define NVREG_TXRXCTL_VLANINS 0x00080
313 NvRegTxRingPhysAddrHigh = 0x148,
314 NvRegRxRingPhysAddrHigh = 0x14C,
315 NvRegTxPauseFrame = 0x170,
316 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
317 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
318 NvRegMIIStatus = 0x180,
319 #define NVREG_MIISTAT_ERROR 0x0001
320 #define NVREG_MIISTAT_LINKCHANGE 0x0008
321 #define NVREG_MIISTAT_MASK 0x000f
322 #define NVREG_MIISTAT_MASK2 0x000f
323 NvRegMIIMask = 0x184,
324 #define NVREG_MII_LINKCHANGE 0x0008
326 NvRegAdapterControl = 0x188,
327 #define NVREG_ADAPTCTL_START 0x02
328 #define NVREG_ADAPTCTL_LINKUP 0x04
329 #define NVREG_ADAPTCTL_PHYVALID 0x40000
330 #define NVREG_ADAPTCTL_RUNNING 0x100000
331 #define NVREG_ADAPTCTL_PHYSHIFT 24
332 NvRegMIISpeed = 0x18c,
333 #define NVREG_MIISPEED_BIT8 (1<<8)
334 #define NVREG_MIIDELAY 5
335 NvRegMIIControl = 0x190,
336 #define NVREG_MIICTL_INUSE 0x08000
337 #define NVREG_MIICTL_WRITE 0x00400
338 #define NVREG_MIICTL_ADDRSHIFT 5
339 NvRegMIIData = 0x194,
340 NvRegWakeUpFlags = 0x200,
341 #define NVREG_WAKEUPFLAGS_VAL 0x7770
342 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
343 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
344 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
345 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
346 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
347 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
348 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
349 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
350 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
351 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
353 NvRegPatternCRC = 0x204,
354 NvRegPatternMask = 0x208,
355 NvRegPowerCap = 0x268,
356 #define NVREG_POWERCAP_D3SUPP (1<<30)
357 #define NVREG_POWERCAP_D2SUPP (1<<26)
358 #define NVREG_POWERCAP_D1SUPP (1<<25)
359 NvRegPowerState = 0x26c,
360 #define NVREG_POWERSTATE_POWEREDUP 0x8000
361 #define NVREG_POWERSTATE_VALID 0x0100
362 #define NVREG_POWERSTATE_MASK 0x0003
363 #define NVREG_POWERSTATE_D0 0x0000
364 #define NVREG_POWERSTATE_D1 0x0001
365 #define NVREG_POWERSTATE_D2 0x0002
366 #define NVREG_POWERSTATE_D3 0x0003
368 NvRegTxZeroReXmt = 0x284,
369 NvRegTxOneReXmt = 0x288,
370 NvRegTxManyReXmt = 0x28c,
371 NvRegTxLateCol = 0x290,
372 NvRegTxUnderflow = 0x294,
373 NvRegTxLossCarrier = 0x298,
374 NvRegTxExcessDef = 0x29c,
375 NvRegTxRetryErr = 0x2a0,
376 NvRegRxFrameErr = 0x2a4,
377 NvRegRxExtraByte = 0x2a8,
378 NvRegRxLateCol = 0x2ac,
380 NvRegRxFrameTooLong = 0x2b4,
381 NvRegRxOverflow = 0x2b8,
382 NvRegRxFCSErr = 0x2bc,
383 NvRegRxFrameAlignErr = 0x2c0,
384 NvRegRxLenErr = 0x2c4,
385 NvRegRxUnicast = 0x2c8,
386 NvRegRxMulticast = 0x2cc,
387 NvRegRxBroadcast = 0x2d0,
389 NvRegTxFrame = 0x2d8,
391 NvRegTxPause = 0x2e0,
392 NvRegRxPause = 0x2e4,
393 NvRegRxDropFrame = 0x2e8,
394 NvRegVlanControl = 0x300,
395 #define NVREG_VLANCONTROL_ENABLE 0x2000
396 NvRegMSIXMap0 = 0x3e0,
397 NvRegMSIXMap1 = 0x3e4,
398 NvRegMSIXIrqStatus = 0x3f0,
400 NvRegPowerState2 = 0x600,
401 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
402 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
405 /* Big endian: should work, but is untested */
411 struct ring_desc_ex {
419 struct ring_desc* orig;
420 struct ring_desc_ex* ex;
423 #define FLAG_MASK_V1 0xffff0000
424 #define FLAG_MASK_V2 0xffffc000
425 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
426 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
428 #define NV_TX_LASTPACKET (1<<16)
429 #define NV_TX_RETRYERROR (1<<19)
430 #define NV_TX_FORCED_INTERRUPT (1<<24)
431 #define NV_TX_DEFERRED (1<<26)
432 #define NV_TX_CARRIERLOST (1<<27)
433 #define NV_TX_LATECOLLISION (1<<28)
434 #define NV_TX_UNDERFLOW (1<<29)
435 #define NV_TX_ERROR (1<<30)
436 #define NV_TX_VALID (1<<31)
438 #define NV_TX2_LASTPACKET (1<<29)
439 #define NV_TX2_RETRYERROR (1<<18)
440 #define NV_TX2_FORCED_INTERRUPT (1<<30)
441 #define NV_TX2_DEFERRED (1<<25)
442 #define NV_TX2_CARRIERLOST (1<<26)
443 #define NV_TX2_LATECOLLISION (1<<27)
444 #define NV_TX2_UNDERFLOW (1<<28)
445 /* error and valid are the same for both */
446 #define NV_TX2_ERROR (1<<30)
447 #define NV_TX2_VALID (1<<31)
448 #define NV_TX2_TSO (1<<28)
449 #define NV_TX2_TSO_SHIFT 14
450 #define NV_TX2_TSO_MAX_SHIFT 14
451 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
452 #define NV_TX2_CHECKSUM_L3 (1<<27)
453 #define NV_TX2_CHECKSUM_L4 (1<<26)
455 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
457 #define NV_RX_DESCRIPTORVALID (1<<16)
458 #define NV_RX_MISSEDFRAME (1<<17)
459 #define NV_RX_SUBSTRACT1 (1<<18)
460 #define NV_RX_ERROR1 (1<<23)
461 #define NV_RX_ERROR2 (1<<24)
462 #define NV_RX_ERROR3 (1<<25)
463 #define NV_RX_ERROR4 (1<<26)
464 #define NV_RX_CRCERR (1<<27)
465 #define NV_RX_OVERFLOW (1<<28)
466 #define NV_RX_FRAMINGERR (1<<29)
467 #define NV_RX_ERROR (1<<30)
468 #define NV_RX_AVAIL (1<<31)
470 #define NV_RX2_CHECKSUMMASK (0x1C000000)
471 #define NV_RX2_CHECKSUMOK1 (0x10000000)
472 #define NV_RX2_CHECKSUMOK2 (0x14000000)
473 #define NV_RX2_CHECKSUMOK3 (0x18000000)
474 #define NV_RX2_DESCRIPTORVALID (1<<29)
475 #define NV_RX2_SUBSTRACT1 (1<<25)
476 #define NV_RX2_ERROR1 (1<<18)
477 #define NV_RX2_ERROR2 (1<<19)
478 #define NV_RX2_ERROR3 (1<<20)
479 #define NV_RX2_ERROR4 (1<<21)
480 #define NV_RX2_CRCERR (1<<22)
481 #define NV_RX2_OVERFLOW (1<<23)
482 #define NV_RX2_FRAMINGERR (1<<24)
483 /* error and avail are the same for both */
484 #define NV_RX2_ERROR (1<<30)
485 #define NV_RX2_AVAIL (1<<31)
487 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
488 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
490 /* Miscelaneous hardware related defines: */
491 #define NV_PCI_REGSZ_VER1 0x270
492 #define NV_PCI_REGSZ_VER2 0x2d4
493 #define NV_PCI_REGSZ_VER3 0x604
495 /* various timeout delays: all in usec */
496 #define NV_TXRX_RESET_DELAY 4
497 #define NV_TXSTOP_DELAY1 10
498 #define NV_TXSTOP_DELAY1MAX 500000
499 #define NV_TXSTOP_DELAY2 100
500 #define NV_RXSTOP_DELAY1 10
501 #define NV_RXSTOP_DELAY1MAX 500000
502 #define NV_RXSTOP_DELAY2 100
503 #define NV_SETUP5_DELAY 5
504 #define NV_SETUP5_DELAYMAX 50000
505 #define NV_POWERUP_DELAY 5
506 #define NV_POWERUP_DELAYMAX 5000
507 #define NV_MIIBUSY_DELAY 50
508 #define NV_MIIPHY_DELAY 10
509 #define NV_MIIPHY_DELAYMAX 10000
510 #define NV_MAC_RESET_DELAY 64
512 #define NV_WAKEUPPATTERNS 5
513 #define NV_WAKEUPMASKENTRIES 4
515 /* General driver defaults */
516 #define NV_WATCHDOG_TIMEO (5*HZ)
518 #define RX_RING_DEFAULT 128
519 #define TX_RING_DEFAULT 256
520 #define RX_RING_MIN 128
521 #define TX_RING_MIN 64
522 #define RING_MAX_DESC_VER_1 1024
523 #define RING_MAX_DESC_VER_2_3 16384
525 /* rx/tx mac addr + type + vlan + align + slack*/
526 #define NV_RX_HEADERS (64)
527 /* even more slack. */
528 #define NV_RX_ALLOC_PAD (64)
530 /* maximum mtu size */
531 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
532 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
534 #define OOM_REFILL (1+HZ/20)
535 #define POLL_WAIT (1+HZ/100)
536 #define LINK_TIMEOUT (3*HZ)
537 #define STATS_INTERVAL (10*HZ)
541 * The nic supports three different descriptor types:
542 * - DESC_VER_1: Original
543 * - DESC_VER_2: support for jumbo frames.
544 * - DESC_VER_3: 64-bit format.
551 #define PHY_OUI_MARVELL 0x5043
552 #define PHY_OUI_CICADA 0x03f1
553 #define PHY_OUI_VITESSE 0x01c1
554 #define PHYID1_OUI_MASK 0x03ff
555 #define PHYID1_OUI_SHFT 6
556 #define PHYID2_OUI_MASK 0xfc00
557 #define PHYID2_OUI_SHFT 10
558 #define PHYID2_MODEL_MASK 0x03f0
559 #define PHY_MODEL_MARVELL_E3016 0x220
560 #define PHY_MARVELL_E3016_INITMASK 0x0300
561 #define PHY_CICADA_INIT1 0x0f000
562 #define PHY_CICADA_INIT2 0x0e00
563 #define PHY_CICADA_INIT3 0x01000
564 #define PHY_CICADA_INIT4 0x0200
565 #define PHY_CICADA_INIT5 0x0004
566 #define PHY_CICADA_INIT6 0x02000
567 #define PHY_VITESSE_INIT_REG1 0x1f
568 #define PHY_VITESSE_INIT_REG2 0x10
569 #define PHY_VITESSE_INIT_REG3 0x11
570 #define PHY_VITESSE_INIT_REG4 0x12
571 #define PHY_VITESSE_INIT_MSK1 0xc
572 #define PHY_VITESSE_INIT_MSK2 0x0180
573 #define PHY_VITESSE_INIT1 0x52b5
574 #define PHY_VITESSE_INIT2 0xaf8a
575 #define PHY_VITESSE_INIT3 0x8
576 #define PHY_VITESSE_INIT4 0x8f8a
577 #define PHY_VITESSE_INIT5 0xaf86
578 #define PHY_VITESSE_INIT6 0x8f86
579 #define PHY_VITESSE_INIT7 0xaf82
580 #define PHY_VITESSE_INIT8 0x0100
581 #define PHY_VITESSE_INIT9 0x8f82
582 #define PHY_VITESSE_INIT10 0x0
584 #define PHY_GIGABIT 0x0100
586 #define PHY_TIMEOUT 0x1
587 #define PHY_ERROR 0x2
591 #define PHY_HALF 0x100
593 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
594 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
595 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
596 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
597 #define NV_PAUSEFRAME_RX_REQ 0x0010
598 #define NV_PAUSEFRAME_TX_REQ 0x0020
599 #define NV_PAUSEFRAME_AUTONEG 0x0040
601 /* MSI/MSI-X defines */
602 #define NV_MSI_X_MAX_VECTORS 8
603 #define NV_MSI_X_VECTORS_MASK 0x000f
604 #define NV_MSI_CAPABLE 0x0010
605 #define NV_MSI_X_CAPABLE 0x0020
606 #define NV_MSI_ENABLED 0x0040
607 #define NV_MSI_X_ENABLED 0x0080
609 #define NV_MSI_X_VECTOR_ALL 0x0
610 #define NV_MSI_X_VECTOR_RX 0x0
611 #define NV_MSI_X_VECTOR_TX 0x1
612 #define NV_MSI_X_VECTOR_OTHER 0x2
615 struct nv_ethtool_str {
616 char name[ETH_GSTRING_LEN];
619 static const struct nv_ethtool_str nv_estats_str[] = {
624 { "tx_late_collision" },
625 { "tx_fifo_errors" },
626 { "tx_carrier_errors" },
627 { "tx_excess_deferral" },
628 { "tx_retry_error" },
629 { "rx_frame_error" },
631 { "rx_late_collision" },
633 { "rx_frame_too_long" },
634 { "rx_over_errors" },
636 { "rx_frame_align_error" },
637 { "rx_length_error" },
642 { "rx_errors_total" },
643 { "tx_errors_total" },
645 /* version 2 stats */
654 struct nv_ethtool_stats {
659 u64 tx_late_collision;
661 u64 tx_carrier_errors;
662 u64 tx_excess_deferral;
666 u64 rx_late_collision;
668 u64 rx_frame_too_long;
671 u64 rx_frame_align_error;
680 /* version 2 stats */
689 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
690 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
693 #define NV_TEST_COUNT_BASE 3
694 #define NV_TEST_COUNT_EXTENDED 4
696 static const struct nv_ethtool_str nv_etests_str[] = {
697 { "link (online/offline)" },
698 { "register (offline) " },
699 { "interrupt (offline) " },
700 { "loopback (offline) " }
703 struct register_test {
708 static const struct register_test nv_registers_test[] = {
709 { NvRegUnknownSetupReg6, 0x01 },
710 { NvRegMisc1, 0x03c },
711 { NvRegOffloadConfig, 0x03ff },
712 { NvRegMulticastAddrA, 0xffffffff },
713 { NvRegTxWatermark, 0x0ff },
714 { NvRegWakeUpFlags, 0x07777 },
721 unsigned int dma_len;
726 * All hardware access under dev->priv->lock, except the performance
728 * - rx is (pseudo-) lockless: it relies on the single-threading provided
729 * by the arch code for interrupts.
730 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
731 * needs dev->priv->lock :-(
732 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
735 /* in dev: base, irq */
740 * Locking: spin_lock(&np->lock); */
741 struct net_device_stats stats;
742 struct nv_ethtool_stats estats;
750 unsigned int phy_oui;
751 unsigned int phy_model;
756 /* General data: RO fields */
757 dma_addr_t ring_addr;
758 struct pci_dev *pci_dev;
771 /* rx specific fields.
772 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
774 union ring_type get_rx, put_rx, first_rx, last_rx;
775 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
776 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
777 struct nv_skb_map *rx_skb;
779 union ring_type rx_ring;
780 unsigned int rx_buf_sz;
781 unsigned int pkt_limit;
782 struct timer_list oom_kick;
783 struct timer_list nic_poll;
784 struct timer_list stats_poll;
788 /* media detection workaround.
789 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
792 unsigned long link_timeout;
794 * tx specific fields.
796 union ring_type get_tx, put_tx, first_tx, last_tx;
797 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
798 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
799 struct nv_skb_map *tx_skb;
801 union ring_type tx_ring;
807 struct vlan_group *vlangrp;
809 /* msi/msi-x fields */
811 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
818 * Maximum number of loops until we assume that a bit in the irq mask
819 * is stuck. Overridable with module param.
821 static int max_interrupt_work = 5;
824 * Optimization can be either throuput mode or cpu mode
826 * Throughput Mode: Every tx and rx packet will generate an interrupt.
827 * CPU Mode: Interrupts are controlled by a timer.
830 NV_OPTIMIZATION_MODE_THROUGHPUT,
831 NV_OPTIMIZATION_MODE_CPU
833 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
836 * Poll interval for timer irq
838 * This interval determines how frequent an interrupt is generated.
839 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
840 * Min = 0, and Max = 65535
842 static int poll_interval = -1;
851 static int msi = NV_MSI_INT_ENABLED;
857 NV_MSIX_INT_DISABLED,
860 static int msix = NV_MSIX_INT_DISABLED;
866 NV_DMA_64BIT_DISABLED,
869 static int dma_64bit = NV_DMA_64BIT_ENABLED;
871 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
873 return netdev_priv(dev);
876 static inline u8 __iomem *get_hwbase(struct net_device *dev)
878 return ((struct fe_priv *)netdev_priv(dev))->base;
881 static inline void pci_push(u8 __iomem *base)
883 /* force out pending posted writes */
887 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
889 return le32_to_cpu(prd->flaglen)
890 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
893 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
895 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
898 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
899 int delay, int delaymax, const char *msg)
901 u8 __iomem *base = get_hwbase(dev);
912 } while ((readl(base + offset) & mask) != target);
916 #define NV_SETUP_RX_RING 0x01
917 #define NV_SETUP_TX_RING 0x02
919 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
921 struct fe_priv *np = get_nvpriv(dev);
922 u8 __iomem *base = get_hwbase(dev);
924 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
925 if (rxtx_flags & NV_SETUP_RX_RING) {
926 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
928 if (rxtx_flags & NV_SETUP_TX_RING) {
929 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
932 if (rxtx_flags & NV_SETUP_RX_RING) {
933 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
934 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
936 if (rxtx_flags & NV_SETUP_TX_RING) {
937 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
938 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
943 static void free_rings(struct net_device *dev)
945 struct fe_priv *np = get_nvpriv(dev);
947 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
948 if (np->rx_ring.orig)
949 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
950 np->rx_ring.orig, np->ring_addr);
953 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
954 np->rx_ring.ex, np->ring_addr);
962 static int using_multi_irqs(struct net_device *dev)
964 struct fe_priv *np = get_nvpriv(dev);
966 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
967 ((np->msi_flags & NV_MSI_X_ENABLED) &&
968 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
974 static void nv_enable_irq(struct net_device *dev)
976 struct fe_priv *np = get_nvpriv(dev);
978 if (!using_multi_irqs(dev)) {
979 if (np->msi_flags & NV_MSI_X_ENABLED)
980 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
982 enable_irq(dev->irq);
984 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
985 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
986 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
990 static void nv_disable_irq(struct net_device *dev)
992 struct fe_priv *np = get_nvpriv(dev);
994 if (!using_multi_irqs(dev)) {
995 if (np->msi_flags & NV_MSI_X_ENABLED)
996 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
998 disable_irq(dev->irq);
1000 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1001 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1002 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1006 /* In MSIX mode, a write to irqmask behaves as XOR */
1007 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1009 u8 __iomem *base = get_hwbase(dev);
1011 writel(mask, base + NvRegIrqMask);
1014 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1016 struct fe_priv *np = get_nvpriv(dev);
1017 u8 __iomem *base = get_hwbase(dev);
1019 if (np->msi_flags & NV_MSI_X_ENABLED) {
1020 writel(mask, base + NvRegIrqMask);
1022 if (np->msi_flags & NV_MSI_ENABLED)
1023 writel(0, base + NvRegMSIIrqMask);
1024 writel(0, base + NvRegIrqMask);
1028 #define MII_READ (-1)
1029 /* mii_rw: read/write a register on the PHY.
1031 * Caller must guarantee serialization
1033 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1035 u8 __iomem *base = get_hwbase(dev);
1039 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1041 reg = readl(base + NvRegMIIControl);
1042 if (reg & NVREG_MIICTL_INUSE) {
1043 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1044 udelay(NV_MIIBUSY_DELAY);
1047 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1048 if (value != MII_READ) {
1049 writel(value, base + NvRegMIIData);
1050 reg |= NVREG_MIICTL_WRITE;
1052 writel(reg, base + NvRegMIIControl);
1054 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1055 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1056 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1057 dev->name, miireg, addr);
1059 } else if (value != MII_READ) {
1060 /* it was a write operation - fewer failures are detectable */
1061 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1062 dev->name, value, miireg, addr);
1064 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1065 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1066 dev->name, miireg, addr);
1069 retval = readl(base + NvRegMIIData);
1070 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1071 dev->name, miireg, addr, retval);
1077 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1079 struct fe_priv *np = netdev_priv(dev);
1081 unsigned int tries = 0;
1083 miicontrol = BMCR_RESET | bmcr_setup;
1084 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1088 /* wait for 500ms */
1091 /* must wait till reset is deasserted */
1092 while (miicontrol & BMCR_RESET) {
1094 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1095 /* FIXME: 100 tries seem excessive */
1102 static int phy_init(struct net_device *dev)
1104 struct fe_priv *np = get_nvpriv(dev);
1105 u8 __iomem *base = get_hwbase(dev);
1106 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1108 /* phy errata for E3016 phy */
1109 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1110 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1111 reg &= ~PHY_MARVELL_E3016_INITMASK;
1112 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1113 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1118 /* set advertise register */
1119 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1120 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1121 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1122 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1126 /* get phy interface type */
1127 phyinterface = readl(base + NvRegPhyInterface);
1129 /* see if gigabit phy */
1130 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1131 if (mii_status & PHY_GIGABIT) {
1132 np->gigabit = PHY_GIGABIT;
1133 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1134 mii_control_1000 &= ~ADVERTISE_1000HALF;
1135 if (phyinterface & PHY_RGMII)
1136 mii_control_1000 |= ADVERTISE_1000FULL;
1138 mii_control_1000 &= ~ADVERTISE_1000FULL;
1140 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1141 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1148 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1149 mii_control |= BMCR_ANENABLE;
1152 * (certain phys need bmcr to be setup with reset)
1154 if (phy_reset(dev, mii_control)) {
1155 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1159 /* phy vendor specific configuration */
1160 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1161 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1162 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1163 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1164 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1165 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1168 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1169 phy_reserved |= PHY_CICADA_INIT5;
1170 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1171 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1175 if (np->phy_oui == PHY_OUI_CICADA) {
1176 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1177 phy_reserved |= PHY_CICADA_INIT6;
1178 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1179 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1183 if (np->phy_oui == PHY_OUI_VITESSE) {
1184 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1185 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1188 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1189 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1192 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1193 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1194 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1197 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1198 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1199 phy_reserved |= PHY_VITESSE_INIT3;
1200 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1201 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1204 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1205 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1208 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1209 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1212 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1213 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1214 phy_reserved |= PHY_VITESSE_INIT3;
1215 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1216 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1220 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1221 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1224 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1225 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1228 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1232 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1233 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1237 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1238 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1239 phy_reserved |= PHY_VITESSE_INIT8;
1240 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1241 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1244 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1245 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1248 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1249 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1253 /* some phys clear out pause advertisment on reset, set it back */
1254 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1256 /* restart auto negotiation */
1257 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1258 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1259 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1266 static void nv_start_rx(struct net_device *dev)
1268 struct fe_priv *np = netdev_priv(dev);
1269 u8 __iomem *base = get_hwbase(dev);
1270 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1272 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1273 /* Already running? Stop it. */
1274 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1275 rx_ctrl &= ~NVREG_RCVCTL_START;
1276 writel(rx_ctrl, base + NvRegReceiverControl);
1279 writel(np->linkspeed, base + NvRegLinkSpeed);
1281 rx_ctrl |= NVREG_RCVCTL_START;
1283 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1284 writel(rx_ctrl, base + NvRegReceiverControl);
1285 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1286 dev->name, np->duplex, np->linkspeed);
1290 static void nv_stop_rx(struct net_device *dev)
1292 struct fe_priv *np = netdev_priv(dev);
1293 u8 __iomem *base = get_hwbase(dev);
1294 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1296 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1297 if (!np->mac_in_use)
1298 rx_ctrl &= ~NVREG_RCVCTL_START;
1300 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1301 writel(rx_ctrl, base + NvRegReceiverControl);
1302 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1303 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1304 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1306 udelay(NV_RXSTOP_DELAY2);
1307 if (!np->mac_in_use)
1308 writel(0, base + NvRegLinkSpeed);
1311 static void nv_start_tx(struct net_device *dev)
1313 struct fe_priv *np = netdev_priv(dev);
1314 u8 __iomem *base = get_hwbase(dev);
1315 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1317 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1318 tx_ctrl |= NVREG_XMITCTL_START;
1320 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1321 writel(tx_ctrl, base + NvRegTransmitterControl);
1325 static void nv_stop_tx(struct net_device *dev)
1327 struct fe_priv *np = netdev_priv(dev);
1328 u8 __iomem *base = get_hwbase(dev);
1329 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1331 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1332 if (!np->mac_in_use)
1333 tx_ctrl &= ~NVREG_XMITCTL_START;
1335 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1336 writel(tx_ctrl, base + NvRegTransmitterControl);
1337 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1338 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1339 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1341 udelay(NV_TXSTOP_DELAY2);
1342 if (!np->mac_in_use)
1343 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1344 base + NvRegTransmitPoll);
1347 static void nv_txrx_reset(struct net_device *dev)
1349 struct fe_priv *np = netdev_priv(dev);
1350 u8 __iomem *base = get_hwbase(dev);
1352 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1353 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1355 udelay(NV_TXRX_RESET_DELAY);
1356 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1360 static void nv_mac_reset(struct net_device *dev)
1362 struct fe_priv *np = netdev_priv(dev);
1363 u8 __iomem *base = get_hwbase(dev);
1365 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1366 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1368 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1370 udelay(NV_MAC_RESET_DELAY);
1371 writel(0, base + NvRegMacReset);
1373 udelay(NV_MAC_RESET_DELAY);
1374 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1378 static void nv_get_hw_stats(struct net_device *dev)
1380 struct fe_priv *np = netdev_priv(dev);
1381 u8 __iomem *base = get_hwbase(dev);
1383 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1384 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1385 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1386 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1387 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1388 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1389 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1390 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1391 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1392 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1393 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1394 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1395 np->estats.rx_runt += readl(base + NvRegRxRunt);
1396 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1397 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1398 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1399 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1400 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1401 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1402 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1403 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1404 np->estats.rx_packets =
1405 np->estats.rx_unicast +
1406 np->estats.rx_multicast +
1407 np->estats.rx_broadcast;
1408 np->estats.rx_errors_total =
1409 np->estats.rx_crc_errors +
1410 np->estats.rx_over_errors +
1411 np->estats.rx_frame_error +
1412 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1413 np->estats.rx_late_collision +
1414 np->estats.rx_runt +
1415 np->estats.rx_frame_too_long;
1416 np->estats.tx_errors_total =
1417 np->estats.tx_late_collision +
1418 np->estats.tx_fifo_errors +
1419 np->estats.tx_carrier_errors +
1420 np->estats.tx_excess_deferral +
1421 np->estats.tx_retry_error;
1423 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1424 np->estats.tx_deferral += readl(base + NvRegTxDef);
1425 np->estats.tx_packets += readl(base + NvRegTxFrame);
1426 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1427 np->estats.tx_pause += readl(base + NvRegTxPause);
1428 np->estats.rx_pause += readl(base + NvRegRxPause);
1429 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1434 * nv_get_stats: dev->get_stats function
1435 * Get latest stats value from the nic.
1436 * Called with read_lock(&dev_base_lock) held for read -
1437 * only synchronized against unregister_netdevice.
1439 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1441 struct fe_priv *np = netdev_priv(dev);
1443 /* If the nic supports hw counters then retrieve latest values */
1444 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1445 nv_get_hw_stats(dev);
1447 /* copy to net_device stats */
1448 np->stats.tx_bytes = np->estats.tx_bytes;
1449 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1450 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1451 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1452 np->stats.rx_over_errors = np->estats.rx_over_errors;
1453 np->stats.rx_errors = np->estats.rx_errors_total;
1454 np->stats.tx_errors = np->estats.tx_errors_total;
1460 * nv_alloc_rx: fill rx ring entries.
1461 * Return 1 if the allocations for the skbs failed and the
1462 * rx engine is without Available descriptors
1464 static int nv_alloc_rx(struct net_device *dev)
1466 struct fe_priv *np = netdev_priv(dev);
1467 struct ring_desc* less_rx;
1469 less_rx = np->get_rx.orig;
1470 if (less_rx-- == np->first_rx.orig)
1471 less_rx = np->last_rx.orig;
1473 while (np->put_rx.orig != less_rx) {
1474 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1476 np->put_rx_ctx->skb = skb;
1477 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1480 PCI_DMA_FROMDEVICE);
1481 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1482 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1484 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1485 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1486 np->put_rx.orig = np->first_rx.orig;
1487 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1488 np->put_rx_ctx = np->first_rx_ctx;
1496 static int nv_alloc_rx_optimized(struct net_device *dev)
1498 struct fe_priv *np = netdev_priv(dev);
1499 struct ring_desc_ex* less_rx;
1501 less_rx = np->get_rx.ex;
1502 if (less_rx-- == np->first_rx.ex)
1503 less_rx = np->last_rx.ex;
1505 while (np->put_rx.ex != less_rx) {
1506 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1508 np->put_rx_ctx->skb = skb;
1509 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1512 PCI_DMA_FROMDEVICE);
1513 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1514 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1515 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1517 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1518 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1519 np->put_rx.ex = np->first_rx.ex;
1520 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1521 np->put_rx_ctx = np->first_rx_ctx;
1529 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1530 #ifdef CONFIG_FORCEDETH_NAPI
1531 static void nv_do_rx_refill(unsigned long data)
1533 struct net_device *dev = (struct net_device *) data;
1535 /* Just reschedule NAPI rx processing */
1536 netif_rx_schedule(dev);
1539 static void nv_do_rx_refill(unsigned long data)
1541 struct net_device *dev = (struct net_device *) data;
1542 struct fe_priv *np = netdev_priv(dev);
1545 if (!using_multi_irqs(dev)) {
1546 if (np->msi_flags & NV_MSI_X_ENABLED)
1547 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1549 disable_irq(dev->irq);
1551 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1553 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1554 retcode = nv_alloc_rx(dev);
1556 retcode = nv_alloc_rx_optimized(dev);
1558 spin_lock_irq(&np->lock);
1559 if (!np->in_shutdown)
1560 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1561 spin_unlock_irq(&np->lock);
1563 if (!using_multi_irqs(dev)) {
1564 if (np->msi_flags & NV_MSI_X_ENABLED)
1565 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1567 enable_irq(dev->irq);
1569 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1574 static void nv_init_rx(struct net_device *dev)
1576 struct fe_priv *np = netdev_priv(dev);
1578 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1579 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1580 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1582 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1583 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1584 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1586 for (i = 0; i < np->rx_ring_size; i++) {
1587 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1588 np->rx_ring.orig[i].flaglen = 0;
1589 np->rx_ring.orig[i].buf = 0;
1591 np->rx_ring.ex[i].flaglen = 0;
1592 np->rx_ring.ex[i].txvlan = 0;
1593 np->rx_ring.ex[i].bufhigh = 0;
1594 np->rx_ring.ex[i].buflow = 0;
1596 np->rx_skb[i].skb = NULL;
1597 np->rx_skb[i].dma = 0;
1601 static void nv_init_tx(struct net_device *dev)
1603 struct fe_priv *np = netdev_priv(dev);
1605 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1606 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1607 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1609 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1610 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1611 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1613 for (i = 0; i < np->tx_ring_size; i++) {
1614 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1615 np->tx_ring.orig[i].flaglen = 0;
1616 np->tx_ring.orig[i].buf = 0;
1618 np->tx_ring.ex[i].flaglen = 0;
1619 np->tx_ring.ex[i].txvlan = 0;
1620 np->tx_ring.ex[i].bufhigh = 0;
1621 np->tx_ring.ex[i].buflow = 0;
1623 np->tx_skb[i].skb = NULL;
1624 np->tx_skb[i].dma = 0;
1628 static int nv_init_ring(struct net_device *dev)
1630 struct fe_priv *np = netdev_priv(dev);
1634 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1635 return nv_alloc_rx(dev);
1637 return nv_alloc_rx_optimized(dev);
1640 static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1642 struct fe_priv *np = netdev_priv(dev);
1645 pci_unmap_page(np->pci_dev, tx_skb->dma,
1651 dev_kfree_skb_any(tx_skb->skb);
1659 static void nv_drain_tx(struct net_device *dev)
1661 struct fe_priv *np = netdev_priv(dev);
1664 for (i = 0; i < np->tx_ring_size; i++) {
1665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1666 np->tx_ring.orig[i].flaglen = 0;
1667 np->tx_ring.orig[i].buf = 0;
1669 np->tx_ring.ex[i].flaglen = 0;
1670 np->tx_ring.ex[i].txvlan = 0;
1671 np->tx_ring.ex[i].bufhigh = 0;
1672 np->tx_ring.ex[i].buflow = 0;
1674 if (nv_release_txskb(dev, &np->tx_skb[i]))
1675 np->stats.tx_dropped++;
1679 static void nv_drain_rx(struct net_device *dev)
1681 struct fe_priv *np = netdev_priv(dev);
1684 for (i = 0; i < np->rx_ring_size; i++) {
1685 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1686 np->rx_ring.orig[i].flaglen = 0;
1687 np->rx_ring.orig[i].buf = 0;
1689 np->rx_ring.ex[i].flaglen = 0;
1690 np->rx_ring.ex[i].txvlan = 0;
1691 np->rx_ring.ex[i].bufhigh = 0;
1692 np->rx_ring.ex[i].buflow = 0;
1695 if (np->rx_skb[i].skb) {
1696 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1697 (skb_end_pointer(np->rx_skb[i].skb) -
1698 np->rx_skb[i].skb->data),
1699 PCI_DMA_FROMDEVICE);
1700 dev_kfree_skb(np->rx_skb[i].skb);
1701 np->rx_skb[i].skb = NULL;
1706 static void drain_ring(struct net_device *dev)
1712 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1714 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1718 * nv_start_xmit: dev->hard_start_xmit function
1719 * Called with netif_tx_lock held.
1721 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1723 struct fe_priv *np = netdev_priv(dev);
1725 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1726 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1730 u32 size = skb->len-skb->data_len;
1731 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1733 struct ring_desc* put_tx;
1734 struct ring_desc* start_tx;
1735 struct ring_desc* prev_tx;
1736 struct nv_skb_map* prev_tx_ctx;
1738 /* add fragments to entries count */
1739 for (i = 0; i < fragments; i++) {
1740 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1741 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1744 empty_slots = nv_get_empty_tx_slots(np);
1745 if (unlikely(empty_slots <= entries)) {
1746 spin_lock_irq(&np->lock);
1747 netif_stop_queue(dev);
1749 spin_unlock_irq(&np->lock);
1750 return NETDEV_TX_BUSY;
1753 start_tx = put_tx = np->put_tx.orig;
1755 /* setup the header buffer */
1758 prev_tx_ctx = np->put_tx_ctx;
1759 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1760 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1762 np->put_tx_ctx->dma_len = bcnt;
1763 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1764 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1766 tx_flags = np->tx_flags;
1769 if (unlikely(put_tx++ == np->last_tx.orig))
1770 put_tx = np->first_tx.orig;
1771 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1772 np->put_tx_ctx = np->first_tx_ctx;
1775 /* setup the fragments */
1776 for (i = 0; i < fragments; i++) {
1777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1778 u32 size = frag->size;
1783 prev_tx_ctx = np->put_tx_ctx;
1784 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1785 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1787 np->put_tx_ctx->dma_len = bcnt;
1788 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1789 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1793 if (unlikely(put_tx++ == np->last_tx.orig))
1794 put_tx = np->first_tx.orig;
1795 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1796 np->put_tx_ctx = np->first_tx_ctx;
1800 /* set last fragment flag */
1801 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1803 /* save skb in this slot's context area */
1804 prev_tx_ctx->skb = skb;
1806 if (skb_is_gso(skb))
1807 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1809 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1810 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1812 spin_lock_irq(&np->lock);
1815 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1816 np->put_tx.orig = put_tx;
1818 spin_unlock_irq(&np->lock);
1820 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1821 dev->name, entries, tx_flags_extra);
1824 for (j=0; j<64; j++) {
1826 dprintk("\n%03x:", j);
1827 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1832 dev->trans_start = jiffies;
1833 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1834 return NETDEV_TX_OK;
1837 static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1839 struct fe_priv *np = netdev_priv(dev);
1842 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1846 u32 size = skb->len-skb->data_len;
1847 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1849 struct ring_desc_ex* put_tx;
1850 struct ring_desc_ex* start_tx;
1851 struct ring_desc_ex* prev_tx;
1852 struct nv_skb_map* prev_tx_ctx;
1854 /* add fragments to entries count */
1855 for (i = 0; i < fragments; i++) {
1856 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1857 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1860 empty_slots = nv_get_empty_tx_slots(np);
1861 if (unlikely(empty_slots <= entries)) {
1862 spin_lock_irq(&np->lock);
1863 netif_stop_queue(dev);
1865 spin_unlock_irq(&np->lock);
1866 return NETDEV_TX_BUSY;
1869 start_tx = put_tx = np->put_tx.ex;
1871 /* setup the header buffer */
1874 prev_tx_ctx = np->put_tx_ctx;
1875 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1876 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1878 np->put_tx_ctx->dma_len = bcnt;
1879 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1880 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1881 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1883 tx_flags = NV_TX2_VALID;
1886 if (unlikely(put_tx++ == np->last_tx.ex))
1887 put_tx = np->first_tx.ex;
1888 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1889 np->put_tx_ctx = np->first_tx_ctx;
1892 /* setup the fragments */
1893 for (i = 0; i < fragments; i++) {
1894 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1895 u32 size = frag->size;
1900 prev_tx_ctx = np->put_tx_ctx;
1901 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1902 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1904 np->put_tx_ctx->dma_len = bcnt;
1905 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1906 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1907 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1911 if (unlikely(put_tx++ == np->last_tx.ex))
1912 put_tx = np->first_tx.ex;
1913 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1914 np->put_tx_ctx = np->first_tx_ctx;
1918 /* set last fragment flag */
1919 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1921 /* save skb in this slot's context area */
1922 prev_tx_ctx->skb = skb;
1924 if (skb_is_gso(skb))
1925 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1927 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1928 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1931 if (likely(!np->vlangrp)) {
1932 start_tx->txvlan = 0;
1934 if (vlan_tx_tag_present(skb))
1935 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1937 start_tx->txvlan = 0;
1940 spin_lock_irq(&np->lock);
1943 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1944 np->put_tx.ex = put_tx;
1946 spin_unlock_irq(&np->lock);
1948 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1949 dev->name, entries, tx_flags_extra);
1952 for (j=0; j<64; j++) {
1954 dprintk("\n%03x:", j);
1955 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1960 dev->trans_start = jiffies;
1961 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1962 return NETDEV_TX_OK;
1966 * nv_tx_done: check for completed packets, release the skbs.
1968 * Caller must own np->lock.
1970 static void nv_tx_done(struct net_device *dev)
1972 struct fe_priv *np = netdev_priv(dev);
1974 struct ring_desc* orig_get_tx = np->get_tx.orig;
1976 while ((np->get_tx.orig != np->put_tx.orig) &&
1977 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1979 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1982 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1983 np->get_tx_ctx->dma_len,
1985 np->get_tx_ctx->dma = 0;
1987 if (np->desc_ver == DESC_VER_1) {
1988 if (flags & NV_TX_LASTPACKET) {
1989 if (flags & NV_TX_ERROR) {
1990 if (flags & NV_TX_UNDERFLOW)
1991 np->stats.tx_fifo_errors++;
1992 if (flags & NV_TX_CARRIERLOST)
1993 np->stats.tx_carrier_errors++;
1994 np->stats.tx_errors++;
1996 np->stats.tx_packets++;
1997 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1999 dev_kfree_skb_any(np->get_tx_ctx->skb);
2000 np->get_tx_ctx->skb = NULL;
2003 if (flags & NV_TX2_LASTPACKET) {
2004 if (flags & NV_TX2_ERROR) {
2005 if (flags & NV_TX2_UNDERFLOW)
2006 np->stats.tx_fifo_errors++;
2007 if (flags & NV_TX2_CARRIERLOST)
2008 np->stats.tx_carrier_errors++;
2009 np->stats.tx_errors++;
2011 np->stats.tx_packets++;
2012 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
2014 dev_kfree_skb_any(np->get_tx_ctx->skb);
2015 np->get_tx_ctx->skb = NULL;
2018 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2019 np->get_tx.orig = np->first_tx.orig;
2020 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2021 np->get_tx_ctx = np->first_tx_ctx;
2023 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2025 netif_wake_queue(dev);
2029 static void nv_tx_done_optimized(struct net_device *dev, int limit)
2031 struct fe_priv *np = netdev_priv(dev);
2033 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
2035 while ((np->get_tx.ex != np->put_tx.ex) &&
2036 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2039 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2042 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2043 np->get_tx_ctx->dma_len,
2045 np->get_tx_ctx->dma = 0;
2047 if (flags & NV_TX2_LASTPACKET) {
2048 if (!(flags & NV_TX2_ERROR))
2049 np->stats.tx_packets++;
2050 dev_kfree_skb_any(np->get_tx_ctx->skb);
2051 np->get_tx_ctx->skb = NULL;
2053 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2054 np->get_tx.ex = np->first_tx.ex;
2055 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2056 np->get_tx_ctx = np->first_tx_ctx;
2058 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2060 netif_wake_queue(dev);
2065 * nv_tx_timeout: dev->tx_timeout function
2066 * Called with netif_tx_lock held.
2068 static void nv_tx_timeout(struct net_device *dev)
2070 struct fe_priv *np = netdev_priv(dev);
2071 u8 __iomem *base = get_hwbase(dev);
2074 if (np->msi_flags & NV_MSI_X_ENABLED)
2075 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2077 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2079 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2084 printk(KERN_INFO "%s: Ring at %lx\n",
2085 dev->name, (unsigned long)np->ring_addr);
2086 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2087 for (i=0;i<=np->register_size;i+= 32) {
2088 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2090 readl(base + i + 0), readl(base + i + 4),
2091 readl(base + i + 8), readl(base + i + 12),
2092 readl(base + i + 16), readl(base + i + 20),
2093 readl(base + i + 24), readl(base + i + 28));
2095 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2096 for (i=0;i<np->tx_ring_size;i+= 4) {
2097 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2098 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2100 le32_to_cpu(np->tx_ring.orig[i].buf),
2101 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2102 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2103 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2104 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2105 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2106 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2107 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2109 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2111 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2112 le32_to_cpu(np->tx_ring.ex[i].buflow),
2113 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2114 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2115 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2116 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2117 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2118 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2119 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2120 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2121 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2122 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2127 spin_lock_irq(&np->lock);
2129 /* 1) stop tx engine */
2132 /* 2) check that the packets were not sent already: */
2133 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2136 nv_tx_done_optimized(dev, np->tx_ring_size);
2138 /* 3) if there are dead entries: clear everything */
2139 if (np->get_tx_ctx != np->put_tx_ctx) {
2140 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2143 setup_hw_rings(dev, NV_SETUP_TX_RING);
2146 netif_wake_queue(dev);
2148 /* 4) restart tx engine */
2150 spin_unlock_irq(&np->lock);
2154 * Called when the nic notices a mismatch between the actual data len on the
2155 * wire and the len indicated in the 802 header
2157 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2159 int hdrlen; /* length of the 802 header */
2160 int protolen; /* length as stored in the proto field */
2162 /* 1) calculate len according to header */
2163 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2164 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2167 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2170 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2171 dev->name, datalen, protolen, hdrlen);
2172 if (protolen > ETH_DATA_LEN)
2173 return datalen; /* Value in proto field not a len, no checks possible */
2176 /* consistency checks: */
2177 if (datalen > ETH_ZLEN) {
2178 if (datalen >= protolen) {
2179 /* more data on wire than in 802 header, trim of
2182 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2183 dev->name, protolen);
2186 /* less data on wire than mentioned in header.
2187 * Discard the packet.
2189 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2194 /* short packet. Accept only if 802 values are also short */
2195 if (protolen > ETH_ZLEN) {
2196 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2200 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2201 dev->name, datalen);
2206 static int nv_rx_process(struct net_device *dev, int limit)
2208 struct fe_priv *np = netdev_priv(dev);
2210 u32 rx_processed_cnt = 0;
2211 struct sk_buff *skb;
2214 while((np->get_rx.orig != np->put_rx.orig) &&
2215 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2216 (rx_processed_cnt++ < limit)) {
2218 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2222 * the packet is for us - immediately tear down the pci mapping.
2223 * TODO: check if a prefetch of the first cacheline improves
2226 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2227 np->get_rx_ctx->dma_len,
2228 PCI_DMA_FROMDEVICE);
2229 skb = np->get_rx_ctx->skb;
2230 np->get_rx_ctx->skb = NULL;
2234 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2235 for (j=0; j<64; j++) {
2237 dprintk("\n%03x:", j);
2238 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2242 /* look at what we actually got: */
2243 if (np->desc_ver == DESC_VER_1) {
2244 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2245 len = flags & LEN_MASK_V1;
2246 if (unlikely(flags & NV_RX_ERROR)) {
2247 if (flags & NV_RX_ERROR4) {
2248 len = nv_getlen(dev, skb->data, len);
2250 np->stats.rx_errors++;
2255 /* framing errors are soft errors */
2256 else if (flags & NV_RX_FRAMINGERR) {
2257 if (flags & NV_RX_SUBSTRACT1) {
2261 /* the rest are hard errors */
2263 if (flags & NV_RX_MISSEDFRAME)
2264 np->stats.rx_missed_errors++;
2265 if (flags & NV_RX_CRCERR)
2266 np->stats.rx_crc_errors++;
2267 if (flags & NV_RX_OVERFLOW)
2268 np->stats.rx_over_errors++;
2269 np->stats.rx_errors++;
2279 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2280 len = flags & LEN_MASK_V2;
2281 if (unlikely(flags & NV_RX2_ERROR)) {
2282 if (flags & NV_RX2_ERROR4) {
2283 len = nv_getlen(dev, skb->data, len);
2285 np->stats.rx_errors++;
2290 /* framing errors are soft errors */
2291 else if (flags & NV_RX2_FRAMINGERR) {
2292 if (flags & NV_RX2_SUBSTRACT1) {
2296 /* the rest are hard errors */
2298 if (flags & NV_RX2_CRCERR)
2299 np->stats.rx_crc_errors++;
2300 if (flags & NV_RX2_OVERFLOW)
2301 np->stats.rx_over_errors++;
2302 np->stats.rx_errors++;
2307 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2308 skb->ip_summed = CHECKSUM_UNNECESSARY;
2310 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2311 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2312 skb->ip_summed = CHECKSUM_UNNECESSARY;
2320 /* got a valid packet - forward it to the network core */
2322 skb->protocol = eth_type_trans(skb, dev);
2323 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2324 dev->name, len, skb->protocol);
2325 #ifdef CONFIG_FORCEDETH_NAPI
2326 netif_receive_skb(skb);
2330 dev->last_rx = jiffies;
2331 np->stats.rx_packets++;
2332 np->stats.rx_bytes += len;
2334 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2335 np->get_rx.orig = np->first_rx.orig;
2336 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2337 np->get_rx_ctx = np->first_rx_ctx;
2340 return rx_processed_cnt;
2343 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2345 struct fe_priv *np = netdev_priv(dev);
2348 u32 rx_processed_cnt = 0;
2349 struct sk_buff *skb;
2352 while((np->get_rx.ex != np->put_rx.ex) &&
2353 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2354 (rx_processed_cnt++ < limit)) {
2356 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2360 * the packet is for us - immediately tear down the pci mapping.
2361 * TODO: check if a prefetch of the first cacheline improves
2364 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2365 np->get_rx_ctx->dma_len,
2366 PCI_DMA_FROMDEVICE);
2367 skb = np->get_rx_ctx->skb;
2368 np->get_rx_ctx->skb = NULL;
2372 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2373 for (j=0; j<64; j++) {
2375 dprintk("\n%03x:", j);
2376 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2380 /* look at what we actually got: */
2381 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2382 len = flags & LEN_MASK_V2;
2383 if (unlikely(flags & NV_RX2_ERROR)) {
2384 if (flags & NV_RX2_ERROR4) {
2385 len = nv_getlen(dev, skb->data, len);
2391 /* framing errors are soft errors */
2392 else if (flags & NV_RX2_FRAMINGERR) {
2393 if (flags & NV_RX2_SUBSTRACT1) {
2397 /* the rest are hard errors */
2404 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
2405 skb->ip_summed = CHECKSUM_UNNECESSARY;
2407 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2408 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2409 skb->ip_summed = CHECKSUM_UNNECESSARY;
2413 /* got a valid packet - forward it to the network core */
2415 skb->protocol = eth_type_trans(skb, dev);
2416 prefetch(skb->data);
2418 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2419 dev->name, len, skb->protocol);
2421 if (likely(!np->vlangrp)) {
2422 #ifdef CONFIG_FORCEDETH_NAPI
2423 netif_receive_skb(skb);
2428 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2429 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2430 #ifdef CONFIG_FORCEDETH_NAPI
2431 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2432 vlanflags & NV_RX3_VLAN_TAG_MASK);
2434 vlan_hwaccel_rx(skb, np->vlangrp,
2435 vlanflags & NV_RX3_VLAN_TAG_MASK);
2438 #ifdef CONFIG_FORCEDETH_NAPI
2439 netif_receive_skb(skb);
2446 dev->last_rx = jiffies;
2447 np->stats.rx_packets++;
2448 np->stats.rx_bytes += len;
2453 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2454 np->get_rx.ex = np->first_rx.ex;
2455 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2456 np->get_rx_ctx = np->first_rx_ctx;
2459 return rx_processed_cnt;
2462 static void set_bufsize(struct net_device *dev)
2464 struct fe_priv *np = netdev_priv(dev);
2466 if (dev->mtu <= ETH_DATA_LEN)
2467 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2469 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2473 * nv_change_mtu: dev->change_mtu function
2474 * Called with dev_base_lock held for read.
2476 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2478 struct fe_priv *np = netdev_priv(dev);
2481 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2487 /* return early if the buffer sizes will not change */
2488 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2490 if (old_mtu == new_mtu)
2493 /* synchronized against open : rtnl_lock() held by caller */
2494 if (netif_running(dev)) {
2495 u8 __iomem *base = get_hwbase(dev);
2497 * It seems that the nic preloads valid ring entries into an
2498 * internal buffer. The procedure for flushing everything is
2499 * guessed, there is probably a simpler approach.
2500 * Changing the MTU is a rare event, it shouldn't matter.
2502 nv_disable_irq(dev);
2503 netif_tx_lock_bh(dev);
2504 spin_lock(&np->lock);
2509 /* drain rx queue */
2512 /* reinit driver view of the rx queue */
2514 if (nv_init_ring(dev)) {
2515 if (!np->in_shutdown)
2516 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2518 /* reinit nic view of the rx queue */
2519 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2520 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2521 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2522 base + NvRegRingSizes);
2524 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2527 /* restart rx engine */
2530 spin_unlock(&np->lock);
2531 netif_tx_unlock_bh(dev);
2537 static void nv_copy_mac_to_hw(struct net_device *dev)
2539 u8 __iomem *base = get_hwbase(dev);
2542 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2543 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2544 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2546 writel(mac[0], base + NvRegMacAddrA);
2547 writel(mac[1], base + NvRegMacAddrB);
2551 * nv_set_mac_address: dev->set_mac_address function
2552 * Called with rtnl_lock() held.
2554 static int nv_set_mac_address(struct net_device *dev, void *addr)
2556 struct fe_priv *np = netdev_priv(dev);
2557 struct sockaddr *macaddr = (struct sockaddr*)addr;
2559 if (!is_valid_ether_addr(macaddr->sa_data))
2560 return -EADDRNOTAVAIL;
2562 /* synchronized against open : rtnl_lock() held by caller */
2563 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2565 if (netif_running(dev)) {
2566 netif_tx_lock_bh(dev);
2567 spin_lock_irq(&np->lock);
2569 /* stop rx engine */
2572 /* set mac address */
2573 nv_copy_mac_to_hw(dev);
2575 /* restart rx engine */
2577 spin_unlock_irq(&np->lock);
2578 netif_tx_unlock_bh(dev);
2580 nv_copy_mac_to_hw(dev);
2586 * nv_set_multicast: dev->set_multicast function
2587 * Called with netif_tx_lock held.
2589 static void nv_set_multicast(struct net_device *dev)
2591 struct fe_priv *np = netdev_priv(dev);
2592 u8 __iomem *base = get_hwbase(dev);
2595 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2597 memset(addr, 0, sizeof(addr));
2598 memset(mask, 0, sizeof(mask));
2600 if (dev->flags & IFF_PROMISC) {
2601 pff |= NVREG_PFF_PROMISC;
2603 pff |= NVREG_PFF_MYADDR;
2605 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2609 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2610 if (dev->flags & IFF_ALLMULTI) {
2611 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2613 struct dev_mc_list *walk;
2615 walk = dev->mc_list;
2616 while (walk != NULL) {
2618 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2619 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2627 addr[0] = alwaysOn[0];
2628 addr[1] = alwaysOn[1];
2629 mask[0] = alwaysOn[0] | alwaysOff[0];
2630 mask[1] = alwaysOn[1] | alwaysOff[1];
2633 addr[0] |= NVREG_MCASTADDRA_FORCE;
2634 pff |= NVREG_PFF_ALWAYS;
2635 spin_lock_irq(&np->lock);
2637 writel(addr[0], base + NvRegMulticastAddrA);
2638 writel(addr[1], base + NvRegMulticastAddrB);
2639 writel(mask[0], base + NvRegMulticastMaskA);
2640 writel(mask[1], base + NvRegMulticastMaskB);
2641 writel(pff, base + NvRegPacketFilterFlags);
2642 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2645 spin_unlock_irq(&np->lock);
2648 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2650 struct fe_priv *np = netdev_priv(dev);
2651 u8 __iomem *base = get_hwbase(dev);
2653 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2655 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2656 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2657 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2658 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2659 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2661 writel(pff, base + NvRegPacketFilterFlags);
2664 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2665 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2666 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2667 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2668 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2669 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2671 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2672 writel(regmisc, base + NvRegMisc1);
2678 * nv_update_linkspeed: Setup the MAC according to the link partner
2679 * @dev: Network device to be configured
2681 * The function queries the PHY and checks if there is a link partner.
2682 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2683 * set to 10 MBit HD.
2685 * The function returns 0 if there is no link partner and 1 if there is
2686 * a good link partner.
2688 static int nv_update_linkspeed(struct net_device *dev)
2690 struct fe_priv *np = netdev_priv(dev);
2691 u8 __iomem *base = get_hwbase(dev);
2694 int adv_lpa, adv_pause, lpa_pause;
2695 int newls = np->linkspeed;
2696 int newdup = np->duplex;
2699 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2701 /* BMSR_LSTATUS is latched, read it twice:
2702 * we want the current value.
2704 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2705 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2707 if (!(mii_status & BMSR_LSTATUS)) {
2708 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2710 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2716 if (np->autoneg == 0) {
2717 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2718 dev->name, np->fixed_mode);
2719 if (np->fixed_mode & LPA_100FULL) {
2720 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2722 } else if (np->fixed_mode & LPA_100HALF) {
2723 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2725 } else if (np->fixed_mode & LPA_10FULL) {
2726 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2729 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2735 /* check auto negotiation is complete */
2736 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2737 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2738 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2741 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2745 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2746 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2747 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2748 dev->name, adv, lpa);
2751 if (np->gigabit == PHY_GIGABIT) {
2752 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2753 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
2755 if ((control_1000 & ADVERTISE_1000FULL) &&
2756 (status_1000 & LPA_1000FULL)) {
2757 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2759 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2765 /* FIXME: handle parallel detection properly */
2766 adv_lpa = lpa & adv;
2767 if (adv_lpa & LPA_100FULL) {
2768 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2770 } else if (adv_lpa & LPA_100HALF) {
2771 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2773 } else if (adv_lpa & LPA_10FULL) {
2774 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2776 } else if (adv_lpa & LPA_10HALF) {
2777 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2780 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2781 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2786 if (np->duplex == newdup && np->linkspeed == newls)
2789 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2790 dev->name, np->linkspeed, np->duplex, newls, newdup);
2792 np->duplex = newdup;
2793 np->linkspeed = newls;
2795 if (np->gigabit == PHY_GIGABIT) {
2796 phyreg = readl(base + NvRegRandomSeed);
2797 phyreg &= ~(0x3FF00);
2798 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2799 phyreg |= NVREG_RNDSEED_FORCE3;
2800 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2801 phyreg |= NVREG_RNDSEED_FORCE2;
2802 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2803 phyreg |= NVREG_RNDSEED_FORCE;
2804 writel(phyreg, base + NvRegRandomSeed);
2807 phyreg = readl(base + NvRegPhyInterface);
2808 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2809 if (np->duplex == 0)
2811 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2813 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2815 writel(phyreg, base + NvRegPhyInterface);
2817 if (phyreg & PHY_RGMII) {
2818 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2819 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2821 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2823 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2825 writel(txreg, base + NvRegTxDeferral);
2827 if (np->desc_ver == DESC_VER_1) {
2828 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2830 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2831 txreg = NVREG_TX_WM_DESC2_3_1000;
2833 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2835 writel(txreg, base + NvRegTxWatermark);
2837 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2840 writel(np->linkspeed, base + NvRegLinkSpeed);
2844 /* setup pause frame */
2845 if (np->duplex != 0) {
2846 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2847 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2848 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2850 switch (adv_pause) {
2851 case ADVERTISE_PAUSE_CAP:
2852 if (lpa_pause & LPA_PAUSE_CAP) {
2853 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2854 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2855 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2858 case ADVERTISE_PAUSE_ASYM:
2859 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2861 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2864 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2865 if (lpa_pause & LPA_PAUSE_CAP)
2867 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2868 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2869 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2871 if (lpa_pause == LPA_PAUSE_ASYM)
2873 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2878 pause_flags = np->pause_flags;
2881 nv_update_pause(dev, pause_flags);
2886 static void nv_linkchange(struct net_device *dev)
2888 if (nv_update_linkspeed(dev)) {
2889 if (!netif_carrier_ok(dev)) {
2890 netif_carrier_on(dev);
2891 printk(KERN_INFO "%s: link up.\n", dev->name);
2895 if (netif_carrier_ok(dev)) {
2896 netif_carrier_off(dev);
2897 printk(KERN_INFO "%s: link down.\n", dev->name);
2903 static void nv_link_irq(struct net_device *dev)
2905 u8 __iomem *base = get_hwbase(dev);
2908 miistat = readl(base + NvRegMIIStatus);
2909 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2910 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2912 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2914 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2917 static irqreturn_t nv_nic_irq(int foo, void *data)
2919 struct net_device *dev = (struct net_device *) data;
2920 struct fe_priv *np = netdev_priv(dev);
2921 u8 __iomem *base = get_hwbase(dev);
2925 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2928 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2929 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2930 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2932 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2933 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2935 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2936 if (!(events & np->irqmask))
2939 spin_lock(&np->lock);
2941 spin_unlock(&np->lock);
2943 #ifdef CONFIG_FORCEDETH_NAPI
2944 if (events & NVREG_IRQ_RX_ALL) {
2945 netif_rx_schedule(dev);
2947 /* Disable furthur receive irq's */
2948 spin_lock(&np->lock);
2949 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2951 if (np->msi_flags & NV_MSI_X_ENABLED)
2952 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2954 writel(np->irqmask, base + NvRegIrqMask);
2955 spin_unlock(&np->lock);
2958 if (nv_rx_process(dev, dev->weight)) {
2959 if (unlikely(nv_alloc_rx(dev))) {
2960 spin_lock(&np->lock);
2961 if (!np->in_shutdown)
2962 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2963 spin_unlock(&np->lock);
2967 if (unlikely(events & NVREG_IRQ_LINK)) {
2968 spin_lock(&np->lock);
2970 spin_unlock(&np->lock);
2972 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2973 spin_lock(&np->lock);
2975 spin_unlock(&np->lock);
2976 np->link_timeout = jiffies + LINK_TIMEOUT;
2978 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2979 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2982 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2983 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2986 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
2987 spin_lock(&np->lock);
2988 /* disable interrupts on the nic */
2989 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2990 writel(0, base + NvRegIrqMask);
2992 writel(np->irqmask, base + NvRegIrqMask);
2995 if (!np->in_shutdown) {
2996 np->nic_poll_irq = np->irqmask;
2997 np->recover_error = 1;
2998 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3000 spin_unlock(&np->lock);
3003 if (unlikely(i > max_interrupt_work)) {
3004 spin_lock(&np->lock);
3005 /* disable interrupts on the nic */
3006 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3007 writel(0, base + NvRegIrqMask);
3009 writel(np->irqmask, base + NvRegIrqMask);
3012 if (!np->in_shutdown) {
3013 np->nic_poll_irq = np->irqmask;
3014 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3016 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3017 spin_unlock(&np->lock);
3022 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3024 return IRQ_RETVAL(i);
3027 #define TX_WORK_PER_LOOP 64
3028 #define RX_WORK_PER_LOOP 64
3030 * All _optimized functions are used to help increase performance
3031 * (reduce CPU and increase throughput). They use descripter version 3,
3032 * compiler directives, and reduce memory accesses.
3034 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3036 struct net_device *dev = (struct net_device *) data;
3037 struct fe_priv *np = netdev_priv(dev);
3038 u8 __iomem *base = get_hwbase(dev);
3042 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3045 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3046 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3047 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3049 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3050 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3052 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3053 if (!(events & np->irqmask))
3056 spin_lock(&np->lock);
3057 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3058 spin_unlock(&np->lock);
3060 #ifdef CONFIG_FORCEDETH_NAPI
3061 if (events & NVREG_IRQ_RX_ALL) {
3062 netif_rx_schedule(dev);
3064 /* Disable furthur receive irq's */
3065 spin_lock(&np->lock);
3066 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3068 if (np->msi_flags & NV_MSI_X_ENABLED)
3069 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3071 writel(np->irqmask, base + NvRegIrqMask);
3072 spin_unlock(&np->lock);
3075 if (nv_rx_process_optimized(dev, dev->weight)) {
3076 if (unlikely(nv_alloc_rx_optimized(dev))) {
3077 spin_lock(&np->lock);
3078 if (!np->in_shutdown)
3079 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3080 spin_unlock(&np->lock);
3084 if (unlikely(events & NVREG_IRQ_LINK)) {
3085 spin_lock(&np->lock);
3087 spin_unlock(&np->lock);
3089 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3090 spin_lock(&np->lock);
3092 spin_unlock(&np->lock);
3093 np->link_timeout = jiffies + LINK_TIMEOUT;
3095 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3096 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3099 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3100 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3103 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3104 spin_lock(&np->lock);
3105 /* disable interrupts on the nic */
3106 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3107 writel(0, base + NvRegIrqMask);
3109 writel(np->irqmask, base + NvRegIrqMask);
3112 if (!np->in_shutdown) {
3113 np->nic_poll_irq = np->irqmask;
3114 np->recover_error = 1;
3115 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3117 spin_unlock(&np->lock);
3121 if (unlikely(i > max_interrupt_work)) {
3122 spin_lock(&np->lock);
3123 /* disable interrupts on the nic */
3124 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3125 writel(0, base + NvRegIrqMask);
3127 writel(np->irqmask, base + NvRegIrqMask);
3130 if (!np->in_shutdown) {
3131 np->nic_poll_irq = np->irqmask;
3132 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3134 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3135 spin_unlock(&np->lock);
3140 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3142 return IRQ_RETVAL(i);
3145 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3147 struct net_device *dev = (struct net_device *) data;
3148 struct fe_priv *np = netdev_priv(dev);
3149 u8 __iomem *base = get_hwbase(dev);
3152 unsigned long flags;
3154 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3157 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3158 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3159 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3160 if (!(events & np->irqmask))
3163 spin_lock_irqsave(&np->lock, flags);
3164 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3165 spin_unlock_irqrestore(&np->lock, flags);
3167 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3171 if (unlikely(i > max_interrupt_work)) {
3172 spin_lock_irqsave(&np->lock, flags);
3173 /* disable interrupts on the nic */
3174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3177 if (!np->in_shutdown) {
3178 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3182 spin_unlock_irqrestore(&np->lock, flags);
3187 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3189 return IRQ_RETVAL(i);
3192 #ifdef CONFIG_FORCEDETH_NAPI
3193 static int nv_napi_poll(struct net_device *dev, int *budget)
3195 int pkts, limit = min(*budget, dev->quota);
3196 struct fe_priv *np = netdev_priv(dev);
3197 u8 __iomem *base = get_hwbase(dev);
3198 unsigned long flags;
3201 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3202 pkts = nv_rx_process(dev, limit);
3203 retcode = nv_alloc_rx(dev);
3205 pkts = nv_rx_process_optimized(dev, limit);
3206 retcode = nv_alloc_rx_optimized(dev);
3210 spin_lock_irqsave(&np->lock, flags);
3211 if (!np->in_shutdown)
3212 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3213 spin_unlock_irqrestore(&np->lock, flags);
3217 /* all done, no more packets present */
3218 netif_rx_complete(dev);
3220 /* re-enable receive interrupts */
3221 spin_lock_irqsave(&np->lock, flags);
3223 np->irqmask |= NVREG_IRQ_RX_ALL;
3224 if (np->msi_flags & NV_MSI_X_ENABLED)
3225 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3227 writel(np->irqmask, base + NvRegIrqMask);
3229 spin_unlock_irqrestore(&np->lock, flags);
3232 /* used up our quantum, so reschedule */
3240 #ifdef CONFIG_FORCEDETH_NAPI
3241 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3243 struct net_device *dev = (struct net_device *) data;
3244 u8 __iomem *base = get_hwbase(dev);
3247 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3248 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3251 netif_rx_schedule(dev);
3252 /* disable receive interrupts on the nic */
3253 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3259 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3261 struct net_device *dev = (struct net_device *) data;
3262 struct fe_priv *np = netdev_priv(dev);
3263 u8 __iomem *base = get_hwbase(dev);
3266 unsigned long flags;
3268 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3271 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3272 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3273 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3274 if (!(events & np->irqmask))
3277 if (nv_rx_process_optimized(dev, dev->weight)) {
3278 if (unlikely(nv_alloc_rx_optimized(dev))) {
3279 spin_lock_irqsave(&np->lock, flags);
3280 if (!np->in_shutdown)
3281 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3282 spin_unlock_irqrestore(&np->lock, flags);
3286 if (unlikely(i > max_interrupt_work)) {
3287 spin_lock_irqsave(&np->lock, flags);
3288 /* disable interrupts on the nic */
3289 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3292 if (!np->in_shutdown) {
3293 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3294 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3296 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3297 spin_unlock_irqrestore(&np->lock, flags);
3301 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3303 return IRQ_RETVAL(i);
3307 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3309 struct net_device *dev = (struct net_device *) data;
3310 struct fe_priv *np = netdev_priv(dev);
3311 u8 __iomem *base = get_hwbase(dev);
3314 unsigned long flags;
3316 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3319 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3320 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3321 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3322 if (!(events & np->irqmask))
3325 /* check tx in case we reached max loop limit in tx isr */
3326 spin_lock_irqsave(&np->lock, flags);
3327 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3328 spin_unlock_irqrestore(&np->lock, flags);
3330 if (events & NVREG_IRQ_LINK) {
3331 spin_lock_irqsave(&np->lock, flags);
3333 spin_unlock_irqrestore(&np->lock, flags);
3335 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3336 spin_lock_irqsave(&np->lock, flags);
3338 spin_unlock_irqrestore(&np->lock, flags);
3339 np->link_timeout = jiffies + LINK_TIMEOUT;
3341 if (events & NVREG_IRQ_RECOVER_ERROR) {
3342 spin_lock_irq(&np->lock);
3343 /* disable interrupts on the nic */
3344 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3347 if (!np->in_shutdown) {
3348 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3349 np->recover_error = 1;
3350 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3352 spin_unlock_irq(&np->lock);
3355 if (events & (NVREG_IRQ_UNKNOWN)) {
3356 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3359 if (unlikely(i > max_interrupt_work)) {
3360 spin_lock_irqsave(&np->lock, flags);
3361 /* disable interrupts on the nic */
3362 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3365 if (!np->in_shutdown) {
3366 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3367 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3369 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3370 spin_unlock_irqrestore(&np->lock, flags);
3375 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3377 return IRQ_RETVAL(i);
3380 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3382 struct net_device *dev = (struct net_device *) data;
3383 struct fe_priv *np = netdev_priv(dev);
3384 u8 __iomem *base = get_hwbase(dev);
3387 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3389 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3390 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3391 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3393 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3394 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3397 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3398 if (!(events & NVREG_IRQ_TIMER))
3399 return IRQ_RETVAL(0);
3401 spin_lock(&np->lock);
3403 spin_unlock(&np->lock);
3405 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3407 return IRQ_RETVAL(1);
3410 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3412 u8 __iomem *base = get_hwbase(dev);
3416 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3417 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3418 * the remaining 8 interrupts.
3420 for (i = 0; i < 8; i++) {
3421 if ((irqmask >> i) & 0x1) {
3422 msixmap |= vector << (i << 2);
3425 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3428 for (i = 0; i < 8; i++) {
3429 if ((irqmask >> (i + 8)) & 0x1) {
3430 msixmap |= vector << (i << 2);
3433 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3436 static int nv_request_irq(struct net_device *dev, int intr_test)
3438 struct fe_priv *np = get_nvpriv(dev);
3439 u8 __iomem *base = get_hwbase(dev);
3442 irqreturn_t (*handler)(int foo, void *data);
3445 handler = nv_nic_irq_test;
3447 if (np->desc_ver == DESC_VER_3)
3448 handler = nv_nic_irq_optimized;
3450 handler = nv_nic_irq;
3453 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3454 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3455 np->msi_x_entry[i].entry = i;
3457 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3458 np->msi_flags |= NV_MSI_X_ENABLED;
3459 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3460 /* Request irq for rx handling */
3461 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
3462 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3463 pci_disable_msix(np->pci_dev);
3464 np->msi_flags &= ~NV_MSI_X_ENABLED;
3467 /* Request irq for tx handling */
3468 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
3469 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3470 pci_disable_msix(np->pci_dev);
3471 np->msi_flags &= ~NV_MSI_X_ENABLED;
3474 /* Request irq for link and timer handling */
3475 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
3476 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3477 pci_disable_msix(np->pci_dev);
3478 np->msi_flags &= ~NV_MSI_X_ENABLED;
3481 /* map interrupts to their respective vector */
3482 writel(0, base + NvRegMSIXMap0);
3483 writel(0, base + NvRegMSIXMap1);
3484 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3485 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3486 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3488 /* Request irq for all interrupts */
3489 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3490 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3491 pci_disable_msix(np->pci_dev);
3492 np->msi_flags &= ~NV_MSI_X_ENABLED;
3496 /* map interrupts to vector 0 */
3497 writel(0, base + NvRegMSIXMap0);
3498 writel(0, base + NvRegMSIXMap1);
3502 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3503 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3504 np->msi_flags |= NV_MSI_ENABLED;
3505 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3506 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3507 pci_disable_msi(np->pci_dev);
3508 np->msi_flags &= ~NV_MSI_ENABLED;
3512 /* map interrupts to vector 0 */
3513 writel(0, base + NvRegMSIMap0);
3514 writel(0, base + NvRegMSIMap1);
3515 /* enable msi vector 0 */
3516 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3520 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3527 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3529 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3534 static void nv_free_irq(struct net_device *dev)
3536 struct fe_priv *np = get_nvpriv(dev);
3539 if (np->msi_flags & NV_MSI_X_ENABLED) {
3540 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3541 free_irq(np->msi_x_entry[i].vector, dev);
3543 pci_disable_msix(np->pci_dev);
3544 np->msi_flags &= ~NV_MSI_X_ENABLED;
3546 free_irq(np->pci_dev->irq, dev);
3547 if (np->msi_flags & NV_MSI_ENABLED) {
3548 pci_disable_msi(np->pci_dev);
3549 np->msi_flags &= ~NV_MSI_ENABLED;
3554 static void nv_do_nic_poll(unsigned long data)
3556 struct net_device *dev = (struct net_device *) data;
3557 struct fe_priv *np = netdev_priv(dev);
3558 u8 __iomem *base = get_hwbase(dev);
3562 * First disable irq(s) and then
3563 * reenable interrupts on the nic, we have to do this before calling
3564 * nv_nic_irq because that may decide to do otherwise
3567 if (!using_multi_irqs(dev)) {
3568 if (np->msi_flags & NV_MSI_X_ENABLED)
3569 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3571 disable_irq_lockdep(dev->irq);
3574 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3575 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3576 mask |= NVREG_IRQ_RX_ALL;
3578 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3579 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3580 mask |= NVREG_IRQ_TX_ALL;
3582 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3583 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3584 mask |= NVREG_IRQ_OTHER;
3587 np->nic_poll_irq = 0;
3589 if (np->recover_error) {
3590 np->recover_error = 0;
3591 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3592 if (netif_running(dev)) {
3593 netif_tx_lock_bh(dev);
3594 spin_lock(&np->lock);
3599 /* drain rx queue */
3602 /* reinit driver view of the rx queue */
3604 if (nv_init_ring(dev)) {
3605 if (!np->in_shutdown)
3606 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3608 /* reinit nic view of the rx queue */
3609 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3610 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3611 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3612 base + NvRegRingSizes);
3614 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3617 /* restart rx engine */
3620 spin_unlock(&np->lock);
3621 netif_tx_unlock_bh(dev);
3625 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3627 writel(mask, base + NvRegIrqMask);
3630 if (!using_multi_irqs(dev)) {
3631 if (np->desc_ver == DESC_VER_3)
3632 nv_nic_irq_optimized(0, dev);
3635 if (np->msi_flags & NV_MSI_X_ENABLED)
3636 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3638 enable_irq_lockdep(dev->irq);
3640 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3641 nv_nic_irq_rx(0, dev);
3642 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3644 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3645 nv_nic_irq_tx(0, dev);
3646 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3648 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3649 nv_nic_irq_other(0, dev);
3650 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3655 #ifdef CONFIG_NET_POLL_CONTROLLER
3656 static void nv_poll_controller(struct net_device *dev)
3658 nv_do_nic_poll((unsigned long) dev);
3662 static void nv_do_stats_poll(unsigned long data)
3664 struct net_device *dev = (struct net_device *) data;
3665 struct fe_priv *np = netdev_priv(dev);
3667 nv_get_hw_stats(dev);
3669 if (!np->in_shutdown)
3670 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3673 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3675 struct fe_priv *np = netdev_priv(dev);
3676 strcpy(info->driver, "forcedeth");
3677 strcpy(info->version, FORCEDETH_VERSION);
3678 strcpy(info->bus_info, pci_name(np->pci_dev));
3681 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3683 struct fe_priv *np = netdev_priv(dev);
3684 wolinfo->supported = WAKE_MAGIC;
3686 spin_lock_irq(&np->lock);
3688 wolinfo->wolopts = WAKE_MAGIC;
3689 spin_unlock_irq(&np->lock);
3692 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3694 struct fe_priv *np = netdev_priv(dev);
3695 u8 __iomem *base = get_hwbase(dev);
3698 if (wolinfo->wolopts == 0) {
3700 } else if (wolinfo->wolopts & WAKE_MAGIC) {
3702 flags = NVREG_WAKEUPFLAGS_ENABLE;
3704 if (netif_running(dev)) {
3705 spin_lock_irq(&np->lock);
3706 writel(flags, base + NvRegWakeUpFlags);
3707 spin_unlock_irq(&np->lock);
3712 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3714 struct fe_priv *np = netdev_priv(dev);
3717 spin_lock_irq(&np->lock);
3718 ecmd->port = PORT_MII;
3719 if (!netif_running(dev)) {
3720 /* We do not track link speed / duplex setting if the
3721 * interface is disabled. Force a link check */
3722 if (nv_update_linkspeed(dev)) {
3723 if (!netif_carrier_ok(dev))
3724 netif_carrier_on(dev);
3726 if (netif_carrier_ok(dev))
3727 netif_carrier_off(dev);
3731 if (netif_carrier_ok(dev)) {
3732 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
3733 case NVREG_LINKSPEED_10:
3734 ecmd->speed = SPEED_10;
3736 case NVREG_LINKSPEED_100:
3737 ecmd->speed = SPEED_100;
3739 case NVREG_LINKSPEED_1000:
3740 ecmd->speed = SPEED_1000;
3743 ecmd->duplex = DUPLEX_HALF;
3745 ecmd->duplex = DUPLEX_FULL;
3751 ecmd->autoneg = np->autoneg;
3753 ecmd->advertising = ADVERTISED_MII;
3755 ecmd->advertising |= ADVERTISED_Autoneg;
3756 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3757 if (adv & ADVERTISE_10HALF)
3758 ecmd->advertising |= ADVERTISED_10baseT_Half;
3759 if (adv & ADVERTISE_10FULL)
3760 ecmd->advertising |= ADVERTISED_10baseT_Full;
3761 if (adv & ADVERTISE_100HALF)
3762 ecmd->advertising |= ADVERTISED_100baseT_Half;
3763 if (adv & ADVERTISE_100FULL)
3764 ecmd->advertising |= ADVERTISED_100baseT_Full;
3765 if (np->gigabit == PHY_GIGABIT) {
3766 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3767 if (adv & ADVERTISE_1000FULL)
3768 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3771 ecmd->supported = (SUPPORTED_Autoneg |
3772 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3773 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3775 if (np->gigabit == PHY_GIGABIT)
3776 ecmd->supported |= SUPPORTED_1000baseT_Full;
3778 ecmd->phy_address = np->phyaddr;
3779 ecmd->transceiver = XCVR_EXTERNAL;
3781 /* ignore maxtxpkt, maxrxpkt for now */
3782 spin_unlock_irq(&np->lock);
3786 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3788 struct fe_priv *np = netdev_priv(dev);
3790 if (ecmd->port != PORT_MII)
3792 if (ecmd->transceiver != XCVR_EXTERNAL)
3794 if (ecmd->phy_address != np->phyaddr) {
3795 /* TODO: support switching between multiple phys. Should be
3796 * trivial, but not enabled due to lack of test hardware. */
3799 if (ecmd->autoneg == AUTONEG_ENABLE) {
3802 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3803 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3804 if (np->gigabit == PHY_GIGABIT)
3805 mask |= ADVERTISED_1000baseT_Full;
3807 if ((ecmd->advertising & mask) == 0)
3810 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3811 /* Note: autonegotiation disable, speed 1000 intentionally
3812 * forbidden - noone should need that. */
3814 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3816 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3822 netif_carrier_off(dev);
3823 if (netif_running(dev)) {
3824 nv_disable_irq(dev);
3825 netif_tx_lock_bh(dev);
3826 spin_lock(&np->lock);
3830 spin_unlock(&np->lock);
3831 netif_tx_unlock_bh(dev);
3834 if (ecmd->autoneg == AUTONEG_ENABLE) {
3839 /* advertise only what has been requested */
3840 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3841 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3842 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3843 adv |= ADVERTISE_10HALF;
3844 if (ecmd->advertising & ADVERTISED_10baseT_Full)
3845 adv |= ADVERTISE_10FULL;
3846 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3847 adv |= ADVERTISE_100HALF;
3848 if (ecmd->advertising & ADVERTISED_100baseT_Full)
3849 adv |= ADVERTISE_100FULL;
3850 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3851 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3852 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3853 adv |= ADVERTISE_PAUSE_ASYM;
3854 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3856 if (np->gigabit == PHY_GIGABIT) {
3857 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3858 adv &= ~ADVERTISE_1000FULL;
3859 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3860 adv |= ADVERTISE_1000FULL;
3861 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3864 if (netif_running(dev))
3865 printk(KERN_INFO "%s: link down.\n", dev->name);
3866 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3867 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3868 bmcr |= BMCR_ANENABLE;
3869 /* reset the phy in order for settings to stick,
3870 * and cause autoneg to start */
3871 if (phy_reset(dev, bmcr)) {
3872 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3876 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3877 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3884 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3885 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3886 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3887 adv |= ADVERTISE_10HALF;
3888 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
3889 adv |= ADVERTISE_10FULL;
3890 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3891 adv |= ADVERTISE_100HALF;
3892 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
3893 adv |= ADVERTISE_100FULL;
3894 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3895 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3896 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3897 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3899 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3900 adv |= ADVERTISE_PAUSE_ASYM;
3901 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3903 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3904 np->fixed_mode = adv;
3906 if (np->gigabit == PHY_GIGABIT) {
3907 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3908 adv &= ~ADVERTISE_1000FULL;
3909 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
3912 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3913 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3914 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
3915 bmcr |= BMCR_FULLDPLX;
3916 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3917 bmcr |= BMCR_SPEED100;
3918 if (np->phy_oui == PHY_OUI_MARVELL) {
3919 /* reset the phy in order for forced mode settings to stick */
3920 if (phy_reset(dev, bmcr)) {
3921 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3925 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3926 if (netif_running(dev)) {
3927 /* Wait a bit and then reconfigure the nic. */
3934 if (netif_running(dev)) {
3943 #define FORCEDETH_REGS_VER 1
3945 static int nv_get_regs_len(struct net_device *dev)
3947 struct fe_priv *np = netdev_priv(dev);
3948 return np->register_size;
3951 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
3953 struct fe_priv *np = netdev_priv(dev);
3954 u8 __iomem *base = get_hwbase(dev);
3958 regs->version = FORCEDETH_REGS_VER;
3959 spin_lock_irq(&np->lock);
3960 for (i = 0;i <= np->register_size/sizeof(u32); i++)
3961 rbuf[i] = readl(base + i*sizeof(u32));
3962 spin_unlock_irq(&np->lock);
3965 static int nv_nway_reset(struct net_device *dev)
3967 struct fe_priv *np = netdev_priv(dev);
3973 netif_carrier_off(dev);
3974 if (netif_running(dev)) {
3975 nv_disable_irq(dev);
3976 netif_tx_lock_bh(dev);
3977 spin_lock(&np->lock);
3981 spin_unlock(&np->lock);
3982 netif_tx_unlock_bh(dev);
3983 printk(KERN_INFO "%s: link down.\n", dev->name);
3986 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3987 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3988 bmcr |= BMCR_ANENABLE;
3989 /* reset the phy in order for settings to stick*/
3990 if (phy_reset(dev, bmcr)) {
3991 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3995 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3996 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3999 if (netif_running(dev)) {
4012 static int nv_set_tso(struct net_device *dev, u32 value)
4014 struct fe_priv *np = netdev_priv(dev);
4016 if ((np->driver_data & DEV_HAS_CHECKSUM))
4017 return ethtool_op_set_tso(dev, value);
4022 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4024 struct fe_priv *np = netdev_priv(dev);
4026 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4027 ring->rx_mini_max_pending = 0;
4028 ring->rx_jumbo_max_pending = 0;
4029 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4031 ring->rx_pending = np->rx_ring_size;
4032 ring->rx_mini_pending = 0;
4033 ring->rx_jumbo_pending = 0;
4034 ring->tx_pending = np->tx_ring_size;
4037 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4039 struct fe_priv *np = netdev_priv(dev);
4040 u8 __iomem *base = get_hwbase(dev);
4041 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4042 dma_addr_t ring_addr;
4044 if (ring->rx_pending < RX_RING_MIN ||
4045 ring->tx_pending < TX_RING_MIN ||
4046 ring->rx_mini_pending != 0 ||
4047 ring->rx_jumbo_pending != 0 ||
4048 (np->desc_ver == DESC_VER_1 &&
4049 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4050 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4051 (np->desc_ver != DESC_VER_1 &&
4052 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4053 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4057 /* allocate new rings */
4058 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4059 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4060 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4063 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4064 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4067 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4068 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4069 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4070 /* fall back to old rings */
4071 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4073 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4074 rxtx_ring, ring_addr);
4077 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4078 rxtx_ring, ring_addr);
4087 if (netif_running(dev)) {
4088 nv_disable_irq(dev);
4089 netif_tx_lock_bh(dev);
4090 spin_lock(&np->lock);
4102 /* set new values */
4103 np->rx_ring_size = ring->rx_pending;
4104 np->tx_ring_size = ring->tx_pending;
4105 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4106 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4107 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4109 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4110 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4112 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4113 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
4114 np->ring_addr = ring_addr;
4116 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4117 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4119 if (netif_running(dev)) {
4120 /* reinit driver view of the queues */
4122 if (nv_init_ring(dev)) {
4123 if (!np->in_shutdown)
4124 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4127 /* reinit nic view of the queues */
4128 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4129 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4130 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4131 base + NvRegRingSizes);
4133 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4136 /* restart engines */
4139 spin_unlock(&np->lock);
4140 netif_tx_unlock_bh(dev);
4148 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4150 struct fe_priv *np = netdev_priv(dev);
4152 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4153 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4154 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4157 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4159 struct fe_priv *np = netdev_priv(dev);
4162 if ((!np->autoneg && np->duplex == 0) ||
4163 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4164 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4168 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4169 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4173 netif_carrier_off(dev);
4174 if (netif_running(dev)) {
4175 nv_disable_irq(dev);
4176 netif_tx_lock_bh(dev);
4177 spin_lock(&np->lock);
4181 spin_unlock(&np->lock);
4182 netif_tx_unlock_bh(dev);
4185 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4186 if (pause->rx_pause)
4187 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4188 if (pause->tx_pause)
4189 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4191 if (np->autoneg && pause->autoneg) {
4192 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4194 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4195 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4196 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4197 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4198 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4199 adv |= ADVERTISE_PAUSE_ASYM;
4200 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4202 if (netif_running(dev))
4203 printk(KERN_INFO "%s: link down.\n", dev->name);
4204 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4205 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4206 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4208 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4209 if (pause->rx_pause)
4210 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4211 if (pause->tx_pause)
4212 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4214 if (!netif_running(dev))
4215 nv_update_linkspeed(dev);
4217 nv_update_pause(dev, np->pause_flags);
4220 if (netif_running(dev)) {
4228 static u32 nv_get_rx_csum(struct net_device *dev)
4230 struct fe_priv *np = netdev_priv(dev);
4231 return (np->rx_csum) != 0;
4234 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4236 struct fe_priv *np = netdev_priv(dev);
4237 u8 __iomem *base = get_hwbase(dev);
4240 if (np->driver_data & DEV_HAS_CHECKSUM) {
4243 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4246 /* vlan is dependent on rx checksum offload */
4247 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4248 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4250 if (netif_running(dev)) {
4251 spin_lock_irq(&np->lock);
4252 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4253 spin_unlock_irq(&np->lock);
4262 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4264 struct fe_priv *np = netdev_priv(dev);
4266 if (np->driver_data & DEV_HAS_CHECKSUM)
4267 return ethtool_op_set_tx_hw_csum(dev, data);
4272 static int nv_set_sg(struct net_device *dev, u32 data)
4274 struct fe_priv *np = netdev_priv(dev);
4276 if (np->driver_data & DEV_HAS_CHECKSUM)
4277 return ethtool_op_set_sg(dev, data);
4282 static int nv_get_stats_count(struct net_device *dev)
4284 struct fe_priv *np = netdev_priv(dev);
4286 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4287 return NV_DEV_STATISTICS_V1_COUNT;
4288 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4289 return NV_DEV_STATISTICS_V2_COUNT;
4294 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4296 struct fe_priv *np = netdev_priv(dev);
4299 nv_do_stats_poll((unsigned long)dev);
4301 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
4304 static int nv_self_test_count(struct net_device *dev)
4306 struct fe_priv *np = netdev_priv(dev);
4308 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4309 return NV_TEST_COUNT_EXTENDED;
4311 return NV_TEST_COUNT_BASE;
4314 static int nv_link_test(struct net_device *dev)
4316 struct fe_priv *np = netdev_priv(dev);
4319 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4320 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4322 /* check phy link status */
4323 if (!(mii_status & BMSR_LSTATUS))
4329 static int nv_register_test(struct net_device *dev)
4331 u8 __iomem *base = get_hwbase(dev);
4333 u32 orig_read, new_read;
4336 orig_read = readl(base + nv_registers_test[i].reg);
4338 /* xor with mask to toggle bits */
4339 orig_read ^= nv_registers_test[i].mask;
4341 writel(orig_read, base + nv_registers_test[i].reg);
4343 new_read = readl(base + nv_registers_test[i].reg);
4345 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4348 /* restore original value */
4349 orig_read ^= nv_registers_test[i].mask;
4350 writel(orig_read, base + nv_registers_test[i].reg);
4352 } while (nv_registers_test[++i].reg != 0);
4357 static int nv_interrupt_test(struct net_device *dev)
4359 struct fe_priv *np = netdev_priv(dev);
4360 u8 __iomem *base = get_hwbase(dev);
4363 u32 save_msi_flags, save_poll_interval = 0;
4365 if (netif_running(dev)) {
4366 /* free current irq */
4368 save_poll_interval = readl(base+NvRegPollingInterval);
4371 /* flag to test interrupt handler */
4374 /* setup test irq */
4375 save_msi_flags = np->msi_flags;
4376 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4377 np->msi_flags |= 0x001; /* setup 1 vector */
4378 if (nv_request_irq(dev, 1))
4381 /* setup timer interrupt */
4382 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4383 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4385 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4387 /* wait for at least one interrupt */
4390 spin_lock_irq(&np->lock);
4392 /* flag should be set within ISR */
4393 testcnt = np->intr_test;
4397 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4398 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4399 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4401 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4403 spin_unlock_irq(&np->lock);
4407 np->msi_flags = save_msi_flags;
4409 if (netif_running(dev)) {
4410 writel(save_poll_interval, base + NvRegPollingInterval);
4411 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4412 /* restore original irq */
4413 if (nv_request_irq(dev, 0))
4420 static int nv_loopback_test(struct net_device *dev)
4422 struct fe_priv *np = netdev_priv(dev);
4423 u8 __iomem *base = get_hwbase(dev);
4424 struct sk_buff *tx_skb, *rx_skb;
4425 dma_addr_t test_dma_addr;
4426 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4428 int len, i, pkt_len;
4430 u32 filter_flags = 0;
4431 u32 misc1_flags = 0;
4434 if (netif_running(dev)) {
4435 nv_disable_irq(dev);
4436 filter_flags = readl(base + NvRegPacketFilterFlags);
4437 misc1_flags = readl(base + NvRegMisc1);
4442 /* reinit driver view of the rx queue */
4446 /* setup hardware for loopback */
4447 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4448 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4450 /* reinit nic view of the rx queue */
4451 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4452 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4453 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4454 base + NvRegRingSizes);
4457 /* restart rx engine */
4461 /* setup packet for tx */
4462 pkt_len = ETH_DATA_LEN;
4463 tx_skb = dev_alloc_skb(pkt_len);
4465 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4466 " of %s\n", dev->name);
4470 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4471 skb_tailroom(tx_skb),
4472 PCI_DMA_FROMDEVICE);
4473 pkt_data = skb_put(tx_skb, pkt_len);
4474 for (i = 0; i < pkt_len; i++)
4475 pkt_data[i] = (u8)(i & 0xff);
4477 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4478 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4479 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4481 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
4482 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
4483 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4485 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4486 pci_push(get_hwbase(dev));
4490 /* check for rx of the packet */
4491 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4492 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4493 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4496 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4497 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4500 if (flags & NV_RX_AVAIL) {
4502 } else if (np->desc_ver == DESC_VER_1) {
4503 if (flags & NV_RX_ERROR)
4506 if (flags & NV_RX2_ERROR) {
4512 if (len != pkt_len) {
4514 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4515 dev->name, len, pkt_len);
4517 rx_skb = np->rx_skb[0].skb;
4518 for (i = 0; i < pkt_len; i++) {
4519 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4521 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4528 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4531 pci_unmap_page(np->pci_dev, test_dma_addr,
4532 (skb_end_pointer(tx_skb) - tx_skb->data),
4534 dev_kfree_skb_any(tx_skb);
4540 /* drain rx queue */
4544 if (netif_running(dev)) {
4545 writel(misc1_flags, base + NvRegMisc1);
4546 writel(filter_flags, base + NvRegPacketFilterFlags);
4553 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4555 struct fe_priv *np = netdev_priv(dev);
4556 u8 __iomem *base = get_hwbase(dev);
4558 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
4560 if (!nv_link_test(dev)) {
4561 test->flags |= ETH_TEST_FL_FAILED;
4565 if (test->flags & ETH_TEST_FL_OFFLINE) {
4566 if (netif_running(dev)) {
4567 netif_stop_queue(dev);
4568 netif_poll_disable(dev);
4569 netif_tx_lock_bh(dev);
4570 spin_lock_irq(&np->lock);
4571 nv_disable_hw_interrupts(dev, np->irqmask);
4572 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4573 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4575 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4581 /* drain rx queue */
4584 spin_unlock_irq(&np->lock);
4585 netif_tx_unlock_bh(dev);
4588 if (!nv_register_test(dev)) {
4589 test->flags |= ETH_TEST_FL_FAILED;
4593 result = nv_interrupt_test(dev);
4595 test->flags |= ETH_TEST_FL_FAILED;
4603 if (!nv_loopback_test(dev)) {
4604 test->flags |= ETH_TEST_FL_FAILED;
4608 if (netif_running(dev)) {
4609 /* reinit driver view of the rx queue */
4611 if (nv_init_ring(dev)) {
4612 if (!np->in_shutdown)
4613 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4615 /* reinit nic view of the rx queue */
4616 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4617 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4618 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4619 base + NvRegRingSizes);
4621 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4623 /* restart rx engine */
4626 netif_start_queue(dev);
4627 netif_poll_enable(dev);
4628 nv_enable_hw_interrupts(dev, np->irqmask);
4633 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4635 switch (stringset) {
4637 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
4640 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
4645 static const struct ethtool_ops ops = {
4646 .get_drvinfo = nv_get_drvinfo,
4647 .get_link = ethtool_op_get_link,
4648 .get_wol = nv_get_wol,
4649 .set_wol = nv_set_wol,
4650 .get_settings = nv_get_settings,
4651 .set_settings = nv_set_settings,
4652 .get_regs_len = nv_get_regs_len,
4653 .get_regs = nv_get_regs,
4654 .nway_reset = nv_nway_reset,
4655 .get_perm_addr = ethtool_op_get_perm_addr,
4656 .get_tso = ethtool_op_get_tso,
4657 .set_tso = nv_set_tso,
4658 .get_ringparam = nv_get_ringparam,
4659 .set_ringparam = nv_set_ringparam,
4660 .get_pauseparam = nv_get_pauseparam,
4661 .set_pauseparam = nv_set_pauseparam,
4662 .get_rx_csum = nv_get_rx_csum,
4663 .set_rx_csum = nv_set_rx_csum,
4664 .get_tx_csum = ethtool_op_get_tx_csum,
4665 .set_tx_csum = nv_set_tx_csum,
4666 .get_sg = ethtool_op_get_sg,
4667 .set_sg = nv_set_sg,
4668 .get_strings = nv_get_strings,
4669 .get_stats_count = nv_get_stats_count,
4670 .get_ethtool_stats = nv_get_ethtool_stats,
4671 .self_test_count = nv_self_test_count,
4672 .self_test = nv_self_test,
4675 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4677 struct fe_priv *np = get_nvpriv(dev);
4679 spin_lock_irq(&np->lock);
4681 /* save vlan group */
4685 /* enable vlan on MAC */
4686 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4688 /* disable vlan on MAC */
4689 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4690 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4693 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4695 spin_unlock_irq(&np->lock);
4698 /* The mgmt unit and driver use a semaphore to access the phy during init */
4699 static int nv_mgmt_acquire_sema(struct net_device *dev)
4701 u8 __iomem *base = get_hwbase(dev);
4703 u32 tx_ctrl, mgmt_sema;
4705 for (i = 0; i < 10; i++) {
4706 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4707 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4712 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4715 for (i = 0; i < 2; i++) {
4716 tx_ctrl = readl(base + NvRegTransmitterControl);
4717 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4718 writel(tx_ctrl, base + NvRegTransmitterControl);
4720 /* verify that semaphore was acquired */
4721 tx_ctrl = readl(base + NvRegTransmitterControl);
4722 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4723 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4732 static int nv_open(struct net_device *dev)
4734 struct fe_priv *np = netdev_priv(dev);
4735 u8 __iomem *base = get_hwbase(dev);
4739 dprintk(KERN_DEBUG "nv_open: begin\n");
4741 /* erase previous misconfiguration */
4742 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4744 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4745 writel(0, base + NvRegMulticastAddrB);
4746 writel(0, base + NvRegMulticastMaskA);
4747 writel(0, base + NvRegMulticastMaskB);
4748 writel(0, base + NvRegPacketFilterFlags);
4750 writel(0, base + NvRegTransmitterControl);
4751 writel(0, base + NvRegReceiverControl);
4753 writel(0, base + NvRegAdapterControl);
4755 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4756 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4758 /* initialize descriptor rings */
4760 oom = nv_init_ring(dev);
4762 writel(0, base + NvRegLinkSpeed);
4763 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4765 writel(0, base + NvRegUnknownSetupReg6);
4767 np->in_shutdown = 0;
4770 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4771 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4772 base + NvRegRingSizes);
4774 writel(np->linkspeed, base + NvRegLinkSpeed);
4775 if (np->desc_ver == DESC_VER_1)
4776 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4778 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
4779 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4780 writel(np->vlanctl_bits, base + NvRegVlanControl);
4782 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
4783 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4784 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4785 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4787 writel(0, base + NvRegMIIMask);
4788 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4789 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4791 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4792 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4793 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
4794 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4796 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4797 get_random_bytes(&i, sizeof(i));
4798 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
4799 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4800 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
4801 if (poll_interval == -1) {
4802 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4803 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4805 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4808 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
4809 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4810 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4811 base + NvRegAdapterControl);
4812 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
4813 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
4815 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
4817 i = readl(base + NvRegPowerState);
4818 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4819 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4823 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4825 nv_disable_hw_interrupts(dev, np->irqmask);
4827 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4828 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4831 if (nv_request_irq(dev, 0)) {
4835 /* ask for interrupts */
4836 nv_enable_hw_interrupts(dev, np->irqmask);
4838 spin_lock_irq(&np->lock);
4839 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4840 writel(0, base + NvRegMulticastAddrB);
4841 writel(0, base + NvRegMulticastMaskA);
4842 writel(0, base + NvRegMulticastMaskB);
4843 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4844 /* One manual link speed update: Interrupts are enabled, future link
4845 * speed changes cause interrupts and are handled by nv_link_irq().
4849 miistat = readl(base + NvRegMIIStatus);
4850 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4851 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4853 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4856 ret = nv_update_linkspeed(dev);
4859 netif_start_queue(dev);
4860 netif_poll_enable(dev);
4863 netif_carrier_on(dev);
4865 printk("%s: no link during initialization.\n", dev->name);
4866 netif_carrier_off(dev);
4869 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4871 /* start statistics timer */
4872 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4873 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4875 spin_unlock_irq(&np->lock);
4883 static int nv_close(struct net_device *dev)
4885 struct fe_priv *np = netdev_priv(dev);
4888 spin_lock_irq(&np->lock);
4889 np->in_shutdown = 1;
4890 spin_unlock_irq(&np->lock);
4891 netif_poll_disable(dev);
4892 synchronize_irq(dev->irq);
4894 del_timer_sync(&np->oom_kick);
4895 del_timer_sync(&np->nic_poll);
4896 del_timer_sync(&np->stats_poll);
4898 netif_stop_queue(dev);
4899 spin_lock_irq(&np->lock);
4904 /* disable interrupts on the nic or we will lock up */
4905 base = get_hwbase(dev);
4906 nv_disable_hw_interrupts(dev, np->irqmask);
4908 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4910 spin_unlock_irq(&np->lock);
4916 if (np->wolenabled) {
4917 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4921 /* FIXME: power down nic */
4926 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4928 struct net_device *dev;
4933 u32 powerstate, txreg;
4934 u32 phystate_orig = 0, phystate;
4935 int phyinitialized = 0;
4937 dev = alloc_etherdev(sizeof(struct fe_priv));
4942 np = netdev_priv(dev);
4943 np->pci_dev = pci_dev;
4944 spin_lock_init(&np->lock);
4945 SET_MODULE_OWNER(dev);
4946 SET_NETDEV_DEV(dev, &pci_dev->dev);
4948 init_timer(&np->oom_kick);
4949 np->oom_kick.data = (unsigned long) dev;
4950 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
4951 init_timer(&np->nic_poll);
4952 np->nic_poll.data = (unsigned long) dev;
4953 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
4954 init_timer(&np->stats_poll);
4955 np->stats_poll.data = (unsigned long) dev;
4956 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
4958 err = pci_enable_device(pci_dev);
4960 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
4961 err, pci_name(pci_dev));
4965 pci_set_master(pci_dev);
4967 err = pci_request_regions(pci_dev, DRV_NAME);
4971 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
4972 np->register_size = NV_PCI_REGSZ_VER3;
4973 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
4974 np->register_size = NV_PCI_REGSZ_VER2;
4976 np->register_size = NV_PCI_REGSZ_VER1;
4980 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
4981 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
4982 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
4983 pci_resource_len(pci_dev, i),
4984 pci_resource_flags(pci_dev, i));
4985 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
4986 pci_resource_len(pci_dev, i) >= np->register_size) {
4987 addr = pci_resource_start(pci_dev, i);
4991 if (i == DEVICE_COUNT_RESOURCE) {
4992 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
4997 /* copy of driver data */
4998 np->driver_data = id->driver_data;
5000 /* handle different descriptor versions */
5001 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5002 /* packet format 3: supports 40-bit addressing */
5003 np->desc_ver = DESC_VER_3;
5004 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5006 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5007 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5010 dev->features |= NETIF_F_HIGHDMA;
5011 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
5013 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5014 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5018 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5019 /* packet format 2: supports jumbo frames */
5020 np->desc_ver = DESC_VER_2;
5021 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5023 /* original packet format */
5024 np->desc_ver = DESC_VER_1;
5025 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5028 np->pkt_limit = NV_PKTLIMIT_1;
5029 if (id->driver_data & DEV_HAS_LARGEDESC)
5030 np->pkt_limit = NV_PKTLIMIT_2;
5032 if (id->driver_data & DEV_HAS_CHECKSUM) {
5034 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5035 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5036 dev->features |= NETIF_F_TSO;
5039 np->vlanctl_bits = 0;
5040 if (id->driver_data & DEV_HAS_VLAN) {
5041 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5042 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5043 dev->vlan_rx_register = nv_vlan_rx_register;
5047 if ((id->driver_data & DEV_HAS_MSI) && msi) {
5048 np->msi_flags |= NV_MSI_CAPABLE;
5050 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5051 np->msi_flags |= NV_MSI_X_CAPABLE;
5054 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5055 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
5056 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5061 np->base = ioremap(addr, np->register_size);
5064 dev->base_addr = (unsigned long)np->base;
5066 dev->irq = pci_dev->irq;
5068 np->rx_ring_size = RX_RING_DEFAULT;
5069 np->tx_ring_size = TX_RING_DEFAULT;
5071 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5072 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5073 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5075 if (!np->rx_ring.orig)
5077 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5079 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5080 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5082 if (!np->rx_ring.ex)
5084 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5086 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
5087 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
5088 if (!np->rx_skb || !np->tx_skb)
5090 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
5091 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
5093 dev->open = nv_open;
5094 dev->stop = nv_close;
5095 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5096 dev->hard_start_xmit = nv_start_xmit;
5098 dev->hard_start_xmit = nv_start_xmit_optimized;
5099 dev->get_stats = nv_get_stats;
5100 dev->change_mtu = nv_change_mtu;
5101 dev->set_mac_address = nv_set_mac_address;
5102 dev->set_multicast_list = nv_set_multicast;
5103 #ifdef CONFIG_NET_POLL_CONTROLLER
5104 dev->poll_controller = nv_poll_controller;
5106 dev->weight = RX_WORK_PER_LOOP;
5107 #ifdef CONFIG_FORCEDETH_NAPI
5108 dev->poll = nv_napi_poll;
5110 SET_ETHTOOL_OPS(dev, &ops);
5111 dev->tx_timeout = nv_tx_timeout;
5112 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5114 pci_set_drvdata(pci_dev, dev);
5116 /* read the mac address */
5117 base = get_hwbase(dev);
5118 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5119 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5121 /* check the workaround bit for correct mac address order */
5122 txreg = readl(base + NvRegTransmitPoll);
5123 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5124 /* mac address is already in correct order */
5125 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5126 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5127 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5128 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5129 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5130 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5132 /* need to reverse mac address to correct order */
5133 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5134 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5135 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5136 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5137 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5138 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5139 /* set permanent address to be correct aswell */
5140 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5141 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5142 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5143 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5145 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5147 if (!is_valid_ether_addr(dev->perm_addr)) {
5149 * Bad mac address. At least one bios sets the mac address
5150 * to 01:23:45:67:89:ab
5152 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
5154 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5155 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5156 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
5157 dev->dev_addr[0] = 0x00;
5158 dev->dev_addr[1] = 0x00;
5159 dev->dev_addr[2] = 0x6c;
5160 get_random_bytes(&dev->dev_addr[3], 3);
5163 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
5164 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5165 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5167 /* set mac address */
5168 nv_copy_mac_to_hw(dev);
5171 writel(0, base + NvRegWakeUpFlags);
5174 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5176 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
5178 /* take phy and nic out of low power mode */
5179 powerstate = readl(base + NvRegPowerState2);
5180 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5181 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5182 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
5183 revision_id >= 0xA3)
5184 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5185 writel(powerstate, base + NvRegPowerState2);
5188 if (np->desc_ver == DESC_VER_1) {
5189 np->tx_flags = NV_TX_VALID;
5191 np->tx_flags = NV_TX2_VALID;
5193 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
5194 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5195 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5196 np->msi_flags |= 0x0003;
5198 np->irqmask = NVREG_IRQMASK_CPU;
5199 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5200 np->msi_flags |= 0x0001;
5203 if (id->driver_data & DEV_NEED_TIMERIRQ)
5204 np->irqmask |= NVREG_IRQ_TIMER;
5205 if (id->driver_data & DEV_NEED_LINKTIMER) {
5206 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5207 np->need_linktimer = 1;
5208 np->link_timeout = jiffies + LINK_TIMEOUT;
5210 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5211 np->need_linktimer = 0;
5214 /* clear phy state and temporarily halt phy interrupts */
5215 writel(0, base + NvRegMIIMask);
5216 phystate = readl(base + NvRegAdapterControl);
5217 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5219 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5220 writel(phystate, base + NvRegAdapterControl);
5222 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5224 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5225 /* management unit running on the mac? */
5226 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5227 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5228 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5229 for (i = 0; i < 5000; i++) {
5231 if (nv_mgmt_acquire_sema(dev)) {
5232 /* management unit setup the phy already? */
5233 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5234 NVREG_XMITCTL_SYNC_PHY_INIT) {
5235 /* phy is inited by mgmt unit */
5237 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5239 /* we need to init the phy */
5247 /* find a suitable phy */
5248 for (i = 1; i <= 32; i++) {
5250 int phyaddr = i & 0x1F;
5252 spin_lock_irq(&np->lock);
5253 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5254 spin_unlock_irq(&np->lock);
5255 if (id1 < 0 || id1 == 0xffff)
5257 spin_lock_irq(&np->lock);
5258 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5259 spin_unlock_irq(&np->lock);
5260 if (id2 < 0 || id2 == 0xffff)
5263 np->phy_model = id2 & PHYID2_MODEL_MASK;
5264 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5265 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5266 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5267 pci_name(pci_dev), id1, id2, phyaddr);
5268 np->phyaddr = phyaddr;
5269 np->phy_oui = id1 | id2;
5273 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
5278 if (!phyinitialized) {
5282 /* see if it is a gigabit phy */
5283 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5284 if (mii_status & PHY_GIGABIT) {
5285 np->gigabit = PHY_GIGABIT;
5289 /* set default link speed settings */
5290 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5294 err = register_netdev(dev);
5296 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
5299 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5300 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5307 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5308 pci_set_drvdata(pci_dev, NULL);
5312 iounmap(get_hwbase(dev));
5314 pci_release_regions(pci_dev);
5316 pci_disable_device(pci_dev);
5323 static void __devexit nv_remove(struct pci_dev *pci_dev)
5325 struct net_device *dev = pci_get_drvdata(pci_dev);
5326 struct fe_priv *np = netdev_priv(dev);
5327 u8 __iomem *base = get_hwbase(dev);
5329 unregister_netdev(dev);
5331 /* special op: write back the misordered MAC address - otherwise
5332 * the next nv_probe would see a wrong address.
5334 writel(np->orig_mac[0], base + NvRegMacAddrA);
5335 writel(np->orig_mac[1], base + NvRegMacAddrB);
5337 /* free all structures */
5339 iounmap(get_hwbase(dev));
5340 pci_release_regions(pci_dev);
5341 pci_disable_device(pci_dev);
5343 pci_set_drvdata(pci_dev, NULL);
5347 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5349 struct net_device *dev = pci_get_drvdata(pdev);
5350 struct fe_priv *np = netdev_priv(dev);
5352 if (!netif_running(dev))
5355 netif_device_detach(dev);
5360 pci_save_state(pdev);
5361 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5362 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5367 static int nv_resume(struct pci_dev *pdev)
5369 struct net_device *dev = pci_get_drvdata(pdev);
5372 if (!netif_running(dev))
5375 netif_device_attach(dev);
5377 pci_set_power_state(pdev, PCI_D0);
5378 pci_restore_state(pdev);
5379 pci_enable_wake(pdev, PCI_D0, 0);
5386 #define nv_suspend NULL
5387 #define nv_resume NULL
5388 #endif /* CONFIG_PM */
5390 static struct pci_device_id pci_tbl[] = {
5391 { /* nForce Ethernet Controller */
5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5395 { /* nForce2 Ethernet Controller */
5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5399 { /* nForce3 Ethernet Controller */
5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5403 { /* nForce3 Ethernet Controller */
5404 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
5405 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5407 { /* nForce3 Ethernet Controller */
5408 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
5409 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5411 { /* nForce3 Ethernet Controller */
5412 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
5413 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5415 { /* nForce3 Ethernet Controller */
5416 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
5417 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5419 { /* CK804 Ethernet Controller */
5420 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
5421 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5423 { /* CK804 Ethernet Controller */
5424 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
5425 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5427 { /* MCP04 Ethernet Controller */
5428 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
5429 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5431 { /* MCP04 Ethernet Controller */
5432 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
5433 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
5435 { /* MCP51 Ethernet Controller */
5436 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
5437 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5439 { /* MCP51 Ethernet Controller */
5440 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
5441 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
5443 { /* MCP55 Ethernet Controller */
5444 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5445 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5447 { /* MCP55 Ethernet Controller */
5448 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5449 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5451 { /* MCP61 Ethernet Controller */
5452 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5453 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5455 { /* MCP61 Ethernet Controller */
5456 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5457 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5459 { /* MCP61 Ethernet Controller */
5460 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5461 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5463 { /* MCP61 Ethernet Controller */
5464 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5465 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5467 { /* MCP65 Ethernet Controller */
5468 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5469 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5471 { /* MCP65 Ethernet Controller */
5472 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5473 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5475 { /* MCP65 Ethernet Controller */
5476 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5477 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5479 { /* MCP65 Ethernet Controller */
5480 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5481 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5483 { /* MCP67 Ethernet Controller */
5484 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5485 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5487 { /* MCP67 Ethernet Controller */
5488 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5489 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5491 { /* MCP67 Ethernet Controller */
5492 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5493 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5495 { /* MCP67 Ethernet Controller */
5496 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5497 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5502 static struct pci_driver driver = {
5503 .name = "forcedeth",
5504 .id_table = pci_tbl,
5506 .remove = __devexit_p(nv_remove),
5507 .suspend = nv_suspend,
5508 .resume = nv_resume,
5511 static int __init init_nic(void)
5513 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
5514 return pci_register_driver(&driver);
5517 static void __exit exit_nic(void)
5519 pci_unregister_driver(&driver);
5522 module_param(max_interrupt_work, int, 0);
5523 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
5524 module_param(optimization_mode, int, 0);
5525 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5526 module_param(poll_interval, int, 0);
5527 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5528 module_param(msi, int, 0);
5529 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5530 module_param(msix, int, 0);
5531 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5532 module_param(dma_64bit, int, 0);
5533 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5535 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5536 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5537 MODULE_LICENSE("GPL");
5539 MODULE_DEVICE_TABLE(pci, pci_tbl);
5541 module_init(init_nic);
5542 module_exit(exit_nic);