2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4,5 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
100 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
101 * 0.46: 20 Oct 2005: Add irq optimization modes.
102 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
103 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
104 * 0.49: 10 Dec 2005: Fix tso for large buffers.
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
113 * We suspect that on some hardware no TX done interrupts are generated.
114 * This means recovery from netif_stop_queue only happens if the hw timer
115 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
116 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
117 * If your hardware reliably generates tx done interrupts, then you can remove
118 * DEV_NEED_TIMERIRQ from the driver_data flags.
119 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
120 * superfluous timer interrupts from the nic.
122 #define FORCEDETH_VERSION "0.55"
123 #define DRV_NAME "forcedeth"
125 #include <linux/module.h>
126 #include <linux/types.h>
127 #include <linux/pci.h>
128 #include <linux/interrupt.h>
129 #include <linux/netdevice.h>
130 #include <linux/etherdevice.h>
131 #include <linux/delay.h>
132 #include <linux/spinlock.h>
133 #include <linux/ethtool.h>
134 #include <linux/timer.h>
135 #include <linux/skbuff.h>
136 #include <linux/mii.h>
137 #include <linux/random.h>
138 #include <linux/init.h>
139 #include <linux/if_vlan.h>
140 #include <linux/dma-mapping.h>
144 #include <asm/uaccess.h>
145 #include <asm/system.h>
148 #define dprintk printk
150 #define dprintk(x...) do { } while (0)
158 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
159 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
160 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
161 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
162 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
163 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
164 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
165 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
166 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
167 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
170 NvRegIrqStatus = 0x000,
171 #define NVREG_IRQSTAT_MIIEVENT 0x040
172 #define NVREG_IRQSTAT_MASK 0x1ff
173 NvRegIrqMask = 0x004,
174 #define NVREG_IRQ_RX_ERROR 0x0001
175 #define NVREG_IRQ_RX 0x0002
176 #define NVREG_IRQ_RX_NOBUF 0x0004
177 #define NVREG_IRQ_TX_ERR 0x0008
178 #define NVREG_IRQ_TX_OK 0x0010
179 #define NVREG_IRQ_TIMER 0x0020
180 #define NVREG_IRQ_LINK 0x0040
181 #define NVREG_IRQ_RX_FORCED 0x0080
182 #define NVREG_IRQ_TX_FORCED 0x0100
183 #define NVREG_IRQMASK_THROUGHPUT 0x00df
184 #define NVREG_IRQMASK_CPU 0x0040
185 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
186 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
187 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK)
189 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
190 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
191 NVREG_IRQ_TX_FORCED))
193 NvRegUnknownSetupReg6 = 0x008,
194 #define NVREG_UNKSETUP6_VAL 3
197 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
198 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
200 NvRegPollingInterval = 0x00c,
201 #define NVREG_POLL_DEFAULT_THROUGHPUT 970
202 #define NVREG_POLL_DEFAULT_CPU 13
203 NvRegMSIMap0 = 0x020,
204 NvRegMSIMap1 = 0x024,
205 NvRegMSIIrqMask = 0x030,
206 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
208 #define NVREG_MISC1_PAUSE_TX 0x01
209 #define NVREG_MISC1_HD 0x02
210 #define NVREG_MISC1_FORCE 0x3b0f3c
212 NvRegMacReset = 0x3c,
213 #define NVREG_MAC_RESET_ASSERT 0x0F3
214 NvRegTransmitterControl = 0x084,
215 #define NVREG_XMITCTL_START 0x01
216 NvRegTransmitterStatus = 0x088,
217 #define NVREG_XMITSTAT_BUSY 0x01
219 NvRegPacketFilterFlags = 0x8c,
220 #define NVREG_PFF_PAUSE_RX 0x08
221 #define NVREG_PFF_ALWAYS 0x7F0000
222 #define NVREG_PFF_PROMISC 0x80
223 #define NVREG_PFF_MYADDR 0x20
225 NvRegOffloadConfig = 0x90,
226 #define NVREG_OFFLOAD_HOMEPHY 0x601
227 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
228 NvRegReceiverControl = 0x094,
229 #define NVREG_RCVCTL_START 0x01
230 NvRegReceiverStatus = 0x98,
231 #define NVREG_RCVSTAT_BUSY 0x01
233 NvRegRandomSeed = 0x9c,
234 #define NVREG_RNDSEED_MASK 0x00ff
235 #define NVREG_RNDSEED_FORCE 0x7f00
236 #define NVREG_RNDSEED_FORCE2 0x2d00
237 #define NVREG_RNDSEED_FORCE3 0x7400
239 NvRegUnknownSetupReg1 = 0xA0,
240 #define NVREG_UNKSETUP1_VAL 0x16070f
241 NvRegUnknownSetupReg2 = 0xA4,
242 #define NVREG_UNKSETUP2_VAL 0x16
243 NvRegMacAddrA = 0xA8,
244 NvRegMacAddrB = 0xAC,
245 NvRegMulticastAddrA = 0xB0,
246 #define NVREG_MCASTADDRA_FORCE 0x01
247 NvRegMulticastAddrB = 0xB4,
248 NvRegMulticastMaskA = 0xB8,
249 NvRegMulticastMaskB = 0xBC,
251 NvRegPhyInterface = 0xC0,
252 #define PHY_RGMII 0x10000000
254 NvRegTxRingPhysAddr = 0x100,
255 NvRegRxRingPhysAddr = 0x104,
256 NvRegRingSizes = 0x108,
257 #define NVREG_RINGSZ_TXSHIFT 0
258 #define NVREG_RINGSZ_RXSHIFT 16
259 NvRegUnknownTransmitterReg = 0x10c,
260 NvRegLinkSpeed = 0x110,
261 #define NVREG_LINKSPEED_FORCE 0x10000
262 #define NVREG_LINKSPEED_10 1000
263 #define NVREG_LINKSPEED_100 100
264 #define NVREG_LINKSPEED_1000 50
265 #define NVREG_LINKSPEED_MASK (0xFFF)
266 NvRegUnknownSetupReg5 = 0x130,
267 #define NVREG_UNKSETUP5_BIT31 (1<<31)
268 NvRegUnknownSetupReg3 = 0x13c,
269 #define NVREG_UNKSETUP3_VAL1 0x200010
270 NvRegTxRxControl = 0x144,
271 #define NVREG_TXRXCTL_KICK 0x0001
272 #define NVREG_TXRXCTL_BIT1 0x0002
273 #define NVREG_TXRXCTL_BIT2 0x0004
274 #define NVREG_TXRXCTL_IDLE 0x0008
275 #define NVREG_TXRXCTL_RESET 0x0010
276 #define NVREG_TXRXCTL_RXCHECK 0x0400
277 #define NVREG_TXRXCTL_DESC_1 0
278 #define NVREG_TXRXCTL_DESC_2 0x02100
279 #define NVREG_TXRXCTL_DESC_3 0x02200
280 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
281 #define NVREG_TXRXCTL_VLANINS 0x00080
282 NvRegTxRingPhysAddrHigh = 0x148,
283 NvRegRxRingPhysAddrHigh = 0x14C,
284 NvRegTxPauseFrame = 0x170,
285 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
286 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
287 NvRegMIIStatus = 0x180,
288 #define NVREG_MIISTAT_ERROR 0x0001
289 #define NVREG_MIISTAT_LINKCHANGE 0x0008
290 #define NVREG_MIISTAT_MASK 0x000f
291 #define NVREG_MIISTAT_MASK2 0x000f
292 NvRegUnknownSetupReg4 = 0x184,
293 #define NVREG_UNKSETUP4_VAL 8
295 NvRegAdapterControl = 0x188,
296 #define NVREG_ADAPTCTL_START 0x02
297 #define NVREG_ADAPTCTL_LINKUP 0x04
298 #define NVREG_ADAPTCTL_PHYVALID 0x40000
299 #define NVREG_ADAPTCTL_RUNNING 0x100000
300 #define NVREG_ADAPTCTL_PHYSHIFT 24
301 NvRegMIISpeed = 0x18c,
302 #define NVREG_MIISPEED_BIT8 (1<<8)
303 #define NVREG_MIIDELAY 5
304 NvRegMIIControl = 0x190,
305 #define NVREG_MIICTL_INUSE 0x08000
306 #define NVREG_MIICTL_WRITE 0x00400
307 #define NVREG_MIICTL_ADDRSHIFT 5
308 NvRegMIIData = 0x194,
309 NvRegWakeUpFlags = 0x200,
310 #define NVREG_WAKEUPFLAGS_VAL 0x7770
311 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
312 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
313 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
314 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
315 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
316 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
317 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
318 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
319 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
320 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
322 NvRegPatternCRC = 0x204,
323 NvRegPatternMask = 0x208,
324 NvRegPowerCap = 0x268,
325 #define NVREG_POWERCAP_D3SUPP (1<<30)
326 #define NVREG_POWERCAP_D2SUPP (1<<26)
327 #define NVREG_POWERCAP_D1SUPP (1<<25)
328 NvRegPowerState = 0x26c,
329 #define NVREG_POWERSTATE_POWEREDUP 0x8000
330 #define NVREG_POWERSTATE_VALID 0x0100
331 #define NVREG_POWERSTATE_MASK 0x0003
332 #define NVREG_POWERSTATE_D0 0x0000
333 #define NVREG_POWERSTATE_D1 0x0001
334 #define NVREG_POWERSTATE_D2 0x0002
335 #define NVREG_POWERSTATE_D3 0x0003
336 NvRegVlanControl = 0x300,
337 #define NVREG_VLANCONTROL_ENABLE 0x2000
338 NvRegMSIXMap0 = 0x3e0,
339 NvRegMSIXMap1 = 0x3e4,
340 NvRegMSIXIrqStatus = 0x3f0,
342 NvRegPowerState2 = 0x600,
343 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
344 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
347 /* Big endian: should work, but is untested */
353 struct ring_desc_ex {
354 u32 PacketBufferHigh;
360 typedef union _ring_type {
361 struct ring_desc* orig;
362 struct ring_desc_ex* ex;
365 #define FLAG_MASK_V1 0xffff0000
366 #define FLAG_MASK_V2 0xffffc000
367 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
368 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
370 #define NV_TX_LASTPACKET (1<<16)
371 #define NV_TX_RETRYERROR (1<<19)
372 #define NV_TX_FORCED_INTERRUPT (1<<24)
373 #define NV_TX_DEFERRED (1<<26)
374 #define NV_TX_CARRIERLOST (1<<27)
375 #define NV_TX_LATECOLLISION (1<<28)
376 #define NV_TX_UNDERFLOW (1<<29)
377 #define NV_TX_ERROR (1<<30)
378 #define NV_TX_VALID (1<<31)
380 #define NV_TX2_LASTPACKET (1<<29)
381 #define NV_TX2_RETRYERROR (1<<18)
382 #define NV_TX2_FORCED_INTERRUPT (1<<30)
383 #define NV_TX2_DEFERRED (1<<25)
384 #define NV_TX2_CARRIERLOST (1<<26)
385 #define NV_TX2_LATECOLLISION (1<<27)
386 #define NV_TX2_UNDERFLOW (1<<28)
387 /* error and valid are the same for both */
388 #define NV_TX2_ERROR (1<<30)
389 #define NV_TX2_VALID (1<<31)
390 #define NV_TX2_TSO (1<<28)
391 #define NV_TX2_TSO_SHIFT 14
392 #define NV_TX2_TSO_MAX_SHIFT 14
393 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
394 #define NV_TX2_CHECKSUM_L3 (1<<27)
395 #define NV_TX2_CHECKSUM_L4 (1<<26)
397 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
399 #define NV_RX_DESCRIPTORVALID (1<<16)
400 #define NV_RX_MISSEDFRAME (1<<17)
401 #define NV_RX_SUBSTRACT1 (1<<18)
402 #define NV_RX_ERROR1 (1<<23)
403 #define NV_RX_ERROR2 (1<<24)
404 #define NV_RX_ERROR3 (1<<25)
405 #define NV_RX_ERROR4 (1<<26)
406 #define NV_RX_CRCERR (1<<27)
407 #define NV_RX_OVERFLOW (1<<28)
408 #define NV_RX_FRAMINGERR (1<<29)
409 #define NV_RX_ERROR (1<<30)
410 #define NV_RX_AVAIL (1<<31)
412 #define NV_RX2_CHECKSUMMASK (0x1C000000)
413 #define NV_RX2_CHECKSUMOK1 (0x10000000)
414 #define NV_RX2_CHECKSUMOK2 (0x14000000)
415 #define NV_RX2_CHECKSUMOK3 (0x18000000)
416 #define NV_RX2_DESCRIPTORVALID (1<<29)
417 #define NV_RX2_SUBSTRACT1 (1<<25)
418 #define NV_RX2_ERROR1 (1<<18)
419 #define NV_RX2_ERROR2 (1<<19)
420 #define NV_RX2_ERROR3 (1<<20)
421 #define NV_RX2_ERROR4 (1<<21)
422 #define NV_RX2_CRCERR (1<<22)
423 #define NV_RX2_OVERFLOW (1<<23)
424 #define NV_RX2_FRAMINGERR (1<<24)
425 /* error and avail are the same for both */
426 #define NV_RX2_ERROR (1<<30)
427 #define NV_RX2_AVAIL (1<<31)
429 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
430 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
432 /* Miscelaneous hardware related defines: */
433 #define NV_PCI_REGSZ_VER1 0x270
434 #define NV_PCI_REGSZ_VER2 0x604
436 /* various timeout delays: all in usec */
437 #define NV_TXRX_RESET_DELAY 4
438 #define NV_TXSTOP_DELAY1 10
439 #define NV_TXSTOP_DELAY1MAX 500000
440 #define NV_TXSTOP_DELAY2 100
441 #define NV_RXSTOP_DELAY1 10
442 #define NV_RXSTOP_DELAY1MAX 500000
443 #define NV_RXSTOP_DELAY2 100
444 #define NV_SETUP5_DELAY 5
445 #define NV_SETUP5_DELAYMAX 50000
446 #define NV_POWERUP_DELAY 5
447 #define NV_POWERUP_DELAYMAX 5000
448 #define NV_MIIBUSY_DELAY 50
449 #define NV_MIIPHY_DELAY 10
450 #define NV_MIIPHY_DELAYMAX 10000
451 #define NV_MAC_RESET_DELAY 64
453 #define NV_WAKEUPPATTERNS 5
454 #define NV_WAKEUPMASKENTRIES 4
456 /* General driver defaults */
457 #define NV_WATCHDOG_TIMEO (5*HZ)
459 #define RX_RING_DEFAULT 128
460 #define TX_RING_DEFAULT 256
461 #define RX_RING_MIN 128
462 #define TX_RING_MIN 64
463 #define RING_MAX_DESC_VER_1 1024
464 #define RING_MAX_DESC_VER_2_3 16384
466 * Difference between the get and put pointers for the tx ring.
467 * This is used to throttle the amount of data outstanding in the
470 #define TX_LIMIT_DIFFERENCE 1
472 /* rx/tx mac addr + type + vlan + align + slack*/
473 #define NV_RX_HEADERS (64)
474 /* even more slack. */
475 #define NV_RX_ALLOC_PAD (64)
477 /* maximum mtu size */
478 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
479 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
481 #define OOM_REFILL (1+HZ/20)
482 #define POLL_WAIT (1+HZ/100)
483 #define LINK_TIMEOUT (3*HZ)
487 * The nic supports three different descriptor types:
488 * - DESC_VER_1: Original
489 * - DESC_VER_2: support for jumbo frames.
490 * - DESC_VER_3: 64-bit format.
497 #define PHY_OUI_MARVELL 0x5043
498 #define PHY_OUI_CICADA 0x03f1
499 #define PHYID1_OUI_MASK 0x03ff
500 #define PHYID1_OUI_SHFT 6
501 #define PHYID2_OUI_MASK 0xfc00
502 #define PHYID2_OUI_SHFT 10
503 #define PHY_INIT1 0x0f000
504 #define PHY_INIT2 0x0e00
505 #define PHY_INIT3 0x01000
506 #define PHY_INIT4 0x0200
507 #define PHY_INIT5 0x0004
508 #define PHY_INIT6 0x02000
509 #define PHY_GIGABIT 0x0100
511 #define PHY_TIMEOUT 0x1
512 #define PHY_ERROR 0x2
516 #define PHY_HALF 0x100
518 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
519 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
520 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
521 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
523 /* MSI/MSI-X defines */
524 #define NV_MSI_X_MAX_VECTORS 8
525 #define NV_MSI_X_VECTORS_MASK 0x000f
526 #define NV_MSI_CAPABLE 0x0010
527 #define NV_MSI_X_CAPABLE 0x0020
528 #define NV_MSI_ENABLED 0x0040
529 #define NV_MSI_X_ENABLED 0x0080
531 #define NV_MSI_X_VECTOR_ALL 0x0
532 #define NV_MSI_X_VECTOR_RX 0x0
533 #define NV_MSI_X_VECTOR_TX 0x1
534 #define NV_MSI_X_VECTOR_OTHER 0x2
538 * All hardware access under dev->priv->lock, except the performance
540 * - rx is (pseudo-) lockless: it relies on the single-threading provided
541 * by the arch code for interrupts.
542 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
543 * needs dev->priv->lock :-(
544 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
547 /* in dev: base, irq */
552 * Locking: spin_lock(&np->lock); */
553 struct net_device_stats stats;
561 unsigned int phy_oui;
564 /* General data: RO fields */
565 dma_addr_t ring_addr;
566 struct pci_dev *pci_dev;
577 /* rx specific fields.
578 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
581 unsigned int cur_rx, refill_rx;
582 struct sk_buff **rx_skbuff;
584 unsigned int rx_buf_sz;
585 unsigned int pkt_limit;
586 struct timer_list oom_kick;
587 struct timer_list nic_poll;
591 /* media detection workaround.
592 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
595 unsigned long link_timeout;
597 * tx specific fields.
600 unsigned int next_tx, nic_tx;
601 struct sk_buff **tx_skbuff;
603 unsigned int *tx_dma_len;
610 struct vlan_group *vlangrp;
612 /* msi/msi-x fields */
614 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
621 * Maximum number of loops until we assume that a bit in the irq mask
622 * is stuck. Overridable with module param.
624 static int max_interrupt_work = 5;
627 * Optimization can be either throuput mode or cpu mode
629 * Throughput Mode: Every tx and rx packet will generate an interrupt.
630 * CPU Mode: Interrupts are controlled by a timer.
632 #define NV_OPTIMIZATION_MODE_THROUGHPUT 0
633 #define NV_OPTIMIZATION_MODE_CPU 1
634 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
637 * Poll interval for timer irq
639 * This interval determines how frequent an interrupt is generated.
640 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
641 * Min = 0, and Max = 65535
643 static int poll_interval = -1;
646 * Disable MSI interrupts
648 static int disable_msi = 0;
651 * Disable MSIX interrupts
653 static int disable_msix = 0;
655 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
657 return netdev_priv(dev);
660 static inline u8 __iomem *get_hwbase(struct net_device *dev)
662 return ((struct fe_priv *)netdev_priv(dev))->base;
665 static inline void pci_push(u8 __iomem *base)
667 /* force out pending posted writes */
671 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
673 return le32_to_cpu(prd->FlagLen)
674 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
677 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
679 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
682 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
683 int delay, int delaymax, const char *msg)
685 u8 __iomem *base = get_hwbase(dev);
696 } while ((readl(base + offset) & mask) != target);
700 #define NV_SETUP_RX_RING 0x01
701 #define NV_SETUP_TX_RING 0x02
703 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
705 struct fe_priv *np = get_nvpriv(dev);
706 u8 __iomem *base = get_hwbase(dev);
708 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
709 if (rxtx_flags & NV_SETUP_RX_RING) {
710 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
712 if (rxtx_flags & NV_SETUP_TX_RING) {
713 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
716 if (rxtx_flags & NV_SETUP_RX_RING) {
717 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
718 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
720 if (rxtx_flags & NV_SETUP_TX_RING) {
721 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
722 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
727 static void free_rings(struct net_device *dev)
729 struct fe_priv *np = get_nvpriv(dev);
731 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
733 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
734 np->rx_ring.orig, np->ring_addr);
737 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
738 np->rx_ring.ex, np->ring_addr);
741 kfree(np->rx_skbuff);
745 kfree(np->tx_skbuff);
749 kfree(np->tx_dma_len);
752 static int using_multi_irqs(struct net_device *dev)
754 struct fe_priv *np = get_nvpriv(dev);
756 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
757 ((np->msi_flags & NV_MSI_X_ENABLED) &&
758 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
764 static void nv_enable_irq(struct net_device *dev)
766 struct fe_priv *np = get_nvpriv(dev);
768 if (!using_multi_irqs(dev)) {
769 if (np->msi_flags & NV_MSI_X_ENABLED)
770 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
772 enable_irq(dev->irq);
774 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
775 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
776 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
780 static void nv_disable_irq(struct net_device *dev)
782 struct fe_priv *np = get_nvpriv(dev);
784 if (!using_multi_irqs(dev)) {
785 if (np->msi_flags & NV_MSI_X_ENABLED)
786 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
788 disable_irq(dev->irq);
790 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
791 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
792 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
796 /* In MSIX mode, a write to irqmask behaves as XOR */
797 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
799 u8 __iomem *base = get_hwbase(dev);
801 writel(mask, base + NvRegIrqMask);
804 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
806 struct fe_priv *np = get_nvpriv(dev);
807 u8 __iomem *base = get_hwbase(dev);
809 if (np->msi_flags & NV_MSI_X_ENABLED) {
810 writel(mask, base + NvRegIrqMask);
812 if (np->msi_flags & NV_MSI_ENABLED)
813 writel(0, base + NvRegMSIIrqMask);
814 writel(0, base + NvRegIrqMask);
818 #define MII_READ (-1)
819 /* mii_rw: read/write a register on the PHY.
821 * Caller must guarantee serialization
823 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
825 u8 __iomem *base = get_hwbase(dev);
829 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
831 reg = readl(base + NvRegMIIControl);
832 if (reg & NVREG_MIICTL_INUSE) {
833 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
834 udelay(NV_MIIBUSY_DELAY);
837 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
838 if (value != MII_READ) {
839 writel(value, base + NvRegMIIData);
840 reg |= NVREG_MIICTL_WRITE;
842 writel(reg, base + NvRegMIIControl);
844 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
845 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
846 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
847 dev->name, miireg, addr);
849 } else if (value != MII_READ) {
850 /* it was a write operation - fewer failures are detectable */
851 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
852 dev->name, value, miireg, addr);
854 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
855 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
856 dev->name, miireg, addr);
859 retval = readl(base + NvRegMIIData);
860 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
861 dev->name, miireg, addr, retval);
867 static int phy_reset(struct net_device *dev)
869 struct fe_priv *np = netdev_priv(dev);
871 unsigned int tries = 0;
873 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
874 miicontrol |= BMCR_RESET;
875 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
882 /* must wait till reset is deasserted */
883 while (miicontrol & BMCR_RESET) {
885 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
886 /* FIXME: 100 tries seem excessive */
893 static int phy_init(struct net_device *dev)
895 struct fe_priv *np = get_nvpriv(dev);
896 u8 __iomem *base = get_hwbase(dev);
897 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
899 /* set advertise register */
900 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
901 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
902 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
903 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
907 /* get phy interface type */
908 phyinterface = readl(base + NvRegPhyInterface);
910 /* see if gigabit phy */
911 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
912 if (mii_status & PHY_GIGABIT) {
913 np->gigabit = PHY_GIGABIT;
914 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
915 mii_control_1000 &= ~ADVERTISE_1000HALF;
916 if (phyinterface & PHY_RGMII)
917 mii_control_1000 |= ADVERTISE_1000FULL;
919 mii_control_1000 &= ~ADVERTISE_1000FULL;
921 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
922 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
930 if (phy_reset(dev)) {
931 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
935 /* phy vendor specific configuration */
936 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
937 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
938 phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
939 phy_reserved |= (PHY_INIT3 | PHY_INIT4);
940 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
941 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
944 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
945 phy_reserved |= PHY_INIT5;
946 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
947 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
951 if (np->phy_oui == PHY_OUI_CICADA) {
952 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
953 phy_reserved |= PHY_INIT6;
954 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
955 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
959 /* some phys clear out pause advertisment on reset, set it back */
960 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
962 /* restart auto negotiation */
963 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
964 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
965 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
972 static void nv_start_rx(struct net_device *dev)
974 struct fe_priv *np = netdev_priv(dev);
975 u8 __iomem *base = get_hwbase(dev);
977 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
978 /* Already running? Stop it. */
979 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
980 writel(0, base + NvRegReceiverControl);
983 writel(np->linkspeed, base + NvRegLinkSpeed);
985 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl);
986 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
987 dev->name, np->duplex, np->linkspeed);
991 static void nv_stop_rx(struct net_device *dev)
993 u8 __iomem *base = get_hwbase(dev);
995 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
996 writel(0, base + NvRegReceiverControl);
997 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
998 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
999 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1001 udelay(NV_RXSTOP_DELAY2);
1002 writel(0, base + NvRegLinkSpeed);
1005 static void nv_start_tx(struct net_device *dev)
1007 u8 __iomem *base = get_hwbase(dev);
1009 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1010 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl);
1014 static void nv_stop_tx(struct net_device *dev)
1016 u8 __iomem *base = get_hwbase(dev);
1018 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1019 writel(0, base + NvRegTransmitterControl);
1020 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1021 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1022 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1024 udelay(NV_TXSTOP_DELAY2);
1025 writel(0, base + NvRegUnknownTransmitterReg);
1028 static void nv_txrx_reset(struct net_device *dev)
1030 struct fe_priv *np = netdev_priv(dev);
1031 u8 __iomem *base = get_hwbase(dev);
1033 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1034 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1036 udelay(NV_TXRX_RESET_DELAY);
1037 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1041 static void nv_mac_reset(struct net_device *dev)
1043 struct fe_priv *np = netdev_priv(dev);
1044 u8 __iomem *base = get_hwbase(dev);
1046 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1047 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1049 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1051 udelay(NV_MAC_RESET_DELAY);
1052 writel(0, base + NvRegMacReset);
1054 udelay(NV_MAC_RESET_DELAY);
1055 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1060 * nv_get_stats: dev->get_stats function
1061 * Get latest stats value from the nic.
1062 * Called with read_lock(&dev_base_lock) held for read -
1063 * only synchronized against unregister_netdevice.
1065 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1067 struct fe_priv *np = netdev_priv(dev);
1069 /* It seems that the nic always generates interrupts and doesn't
1070 * accumulate errors internally. Thus the current values in np->stats
1071 * are already up to date.
1077 * nv_alloc_rx: fill rx ring entries.
1078 * Return 1 if the allocations for the skbs failed and the
1079 * rx engine is without Available descriptors
1081 static int nv_alloc_rx(struct net_device *dev)
1083 struct fe_priv *np = netdev_priv(dev);
1084 unsigned int refill_rx = np->refill_rx;
1087 while (np->cur_rx != refill_rx) {
1088 struct sk_buff *skb;
1090 nr = refill_rx % np->rx_ring_size;
1091 if (np->rx_skbuff[nr] == NULL) {
1093 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1098 np->rx_skbuff[nr] = skb;
1100 skb = np->rx_skbuff[nr];
1102 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1103 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1104 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1105 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
1107 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1109 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1110 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1112 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1114 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1115 dev->name, refill_rx);
1118 np->refill_rx = refill_rx;
1119 if (np->cur_rx - refill_rx == np->rx_ring_size)
1124 static void nv_do_rx_refill(unsigned long data)
1126 struct net_device *dev = (struct net_device *) data;
1127 struct fe_priv *np = netdev_priv(dev);
1129 if (!using_multi_irqs(dev)) {
1130 if (np->msi_flags & NV_MSI_X_ENABLED)
1131 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1133 disable_irq(dev->irq);
1135 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1137 if (nv_alloc_rx(dev)) {
1138 spin_lock_irq(&np->lock);
1139 if (!np->in_shutdown)
1140 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1141 spin_unlock_irq(&np->lock);
1143 if (!using_multi_irqs(dev)) {
1144 if (np->msi_flags & NV_MSI_X_ENABLED)
1145 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1147 enable_irq(dev->irq);
1149 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1153 static void nv_init_rx(struct net_device *dev)
1155 struct fe_priv *np = netdev_priv(dev);
1158 np->cur_rx = np->rx_ring_size;
1160 for (i = 0; i < np->rx_ring_size; i++)
1161 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1162 np->rx_ring.orig[i].FlagLen = 0;
1164 np->rx_ring.ex[i].FlagLen = 0;
1167 static void nv_init_tx(struct net_device *dev)
1169 struct fe_priv *np = netdev_priv(dev);
1172 np->next_tx = np->nic_tx = 0;
1173 for (i = 0; i < np->tx_ring_size; i++) {
1174 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1175 np->tx_ring.orig[i].FlagLen = 0;
1177 np->tx_ring.ex[i].FlagLen = 0;
1178 np->tx_skbuff[i] = NULL;
1183 static int nv_init_ring(struct net_device *dev)
1187 return nv_alloc_rx(dev);
1190 static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
1192 struct fe_priv *np = netdev_priv(dev);
1194 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
1197 if (np->tx_dma[skbnr]) {
1198 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1199 np->tx_dma_len[skbnr],
1201 np->tx_dma[skbnr] = 0;
1204 if (np->tx_skbuff[skbnr]) {
1205 dev_kfree_skb_any(np->tx_skbuff[skbnr]);
1206 np->tx_skbuff[skbnr] = NULL;
1213 static void nv_drain_tx(struct net_device *dev)
1215 struct fe_priv *np = netdev_priv(dev);
1218 for (i = 0; i < np->tx_ring_size; i++) {
1219 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1220 np->tx_ring.orig[i].FlagLen = 0;
1222 np->tx_ring.ex[i].FlagLen = 0;
1223 if (nv_release_txskb(dev, i))
1224 np->stats.tx_dropped++;
1228 static void nv_drain_rx(struct net_device *dev)
1230 struct fe_priv *np = netdev_priv(dev);
1232 for (i = 0; i < np->rx_ring_size; i++) {
1233 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1234 np->rx_ring.orig[i].FlagLen = 0;
1236 np->rx_ring.ex[i].FlagLen = 0;
1238 if (np->rx_skbuff[i]) {
1239 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1240 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1241 PCI_DMA_FROMDEVICE);
1242 dev_kfree_skb(np->rx_skbuff[i]);
1243 np->rx_skbuff[i] = NULL;
1248 static void drain_ring(struct net_device *dev)
1255 * nv_start_xmit: dev->hard_start_xmit function
1256 * Called with dev->xmit_lock held.
1258 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1260 struct fe_priv *np = netdev_priv(dev);
1262 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1263 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1264 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1265 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1269 u32 size = skb->len-skb->data_len;
1270 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1271 u32 tx_flags_vlan = 0;
1273 /* add fragments to entries count */
1274 for (i = 0; i < fragments; i++) {
1275 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1276 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1279 spin_lock_irq(&np->lock);
1281 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
1282 spin_unlock_irq(&np->lock);
1283 netif_stop_queue(dev);
1284 return NETDEV_TX_BUSY;
1287 /* setup the header buffer */
1289 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1290 nr = (nr + 1) % np->tx_ring_size;
1292 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1294 np->tx_dma_len[nr] = bcnt;
1296 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1297 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1298 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1300 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1301 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1302 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1304 tx_flags = np->tx_flags;
1309 /* setup the fragments */
1310 for (i = 0; i < fragments; i++) {
1311 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1312 u32 size = frag->size;
1316 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1317 nr = (nr + 1) % np->tx_ring_size;
1319 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1321 np->tx_dma_len[nr] = bcnt;
1323 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1324 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1325 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1327 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1328 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1329 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1336 /* set last fragment flag */
1337 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1338 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1340 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1343 np->tx_skbuff[nr] = skb;
1346 if (skb_shinfo(skb)->tso_size)
1347 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1350 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1353 if (np->vlangrp && vlan_tx_tag_present(skb)) {
1354 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb);
1358 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1359 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1361 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
1362 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1365 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
1366 dev->name, np->next_tx, entries, tx_flags_extra);
1369 for (j=0; j<64; j++) {
1371 dprintk("\n%03x:", j);
1372 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1377 np->next_tx += entries;
1379 dev->trans_start = jiffies;
1380 spin_unlock_irq(&np->lock);
1381 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1382 pci_push(get_hwbase(dev));
1383 return NETDEV_TX_OK;
1387 * nv_tx_done: check for completed packets, release the skbs.
1389 * Caller must own np->lock.
1391 static void nv_tx_done(struct net_device *dev)
1393 struct fe_priv *np = netdev_priv(dev);
1396 struct sk_buff *skb;
1398 while (np->nic_tx != np->next_tx) {
1399 i = np->nic_tx % np->tx_ring_size;
1401 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1402 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
1404 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
1406 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
1407 dev->name, np->nic_tx, Flags);
1408 if (Flags & NV_TX_VALID)
1410 if (np->desc_ver == DESC_VER_1) {
1411 if (Flags & NV_TX_LASTPACKET) {
1412 skb = np->tx_skbuff[i];
1413 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1414 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1415 if (Flags & NV_TX_UNDERFLOW)
1416 np->stats.tx_fifo_errors++;
1417 if (Flags & NV_TX_CARRIERLOST)
1418 np->stats.tx_carrier_errors++;
1419 np->stats.tx_errors++;
1421 np->stats.tx_packets++;
1422 np->stats.tx_bytes += skb->len;
1426 if (Flags & NV_TX2_LASTPACKET) {
1427 skb = np->tx_skbuff[i];
1428 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1429 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1430 if (Flags & NV_TX2_UNDERFLOW)
1431 np->stats.tx_fifo_errors++;
1432 if (Flags & NV_TX2_CARRIERLOST)
1433 np->stats.tx_carrier_errors++;
1434 np->stats.tx_errors++;
1436 np->stats.tx_packets++;
1437 np->stats.tx_bytes += skb->len;
1441 nv_release_txskb(dev, i);
1444 if (np->next_tx - np->nic_tx < np->tx_limit_start)
1445 netif_wake_queue(dev);
1449 * nv_tx_timeout: dev->tx_timeout function
1450 * Called with dev->xmit_lock held.
1452 static void nv_tx_timeout(struct net_device *dev)
1454 struct fe_priv *np = netdev_priv(dev);
1455 u8 __iomem *base = get_hwbase(dev);
1458 if (np->msi_flags & NV_MSI_X_ENABLED)
1459 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
1461 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1463 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1468 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
1469 dev->name, (unsigned long)np->ring_addr,
1470 np->next_tx, np->nic_tx);
1471 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1472 for (i=0;i<=np->register_size;i+= 32) {
1473 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1475 readl(base + i + 0), readl(base + i + 4),
1476 readl(base + i + 8), readl(base + i + 12),
1477 readl(base + i + 16), readl(base + i + 20),
1478 readl(base + i + 24), readl(base + i + 28));
1480 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
1481 for (i=0;i<np->tx_ring_size;i+= 4) {
1482 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1483 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1485 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
1486 le32_to_cpu(np->tx_ring.orig[i].FlagLen),
1487 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
1488 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
1489 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
1490 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
1491 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
1492 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
1494 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1496 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
1497 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
1498 le32_to_cpu(np->tx_ring.ex[i].FlagLen),
1499 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
1500 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
1501 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
1502 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
1503 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
1504 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
1505 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
1506 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
1507 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
1512 spin_lock_irq(&np->lock);
1514 /* 1) stop tx engine */
1517 /* 2) check that the packets were not sent already: */
1520 /* 3) if there are dead entries: clear everything */
1521 if (np->next_tx != np->nic_tx) {
1522 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1524 np->next_tx = np->nic_tx = 0;
1525 setup_hw_rings(dev, NV_SETUP_TX_RING);
1526 netif_wake_queue(dev);
1529 /* 4) restart tx engine */
1531 spin_unlock_irq(&np->lock);
1535 * Called when the nic notices a mismatch between the actual data len on the
1536 * wire and the len indicated in the 802 header
1538 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1540 int hdrlen; /* length of the 802 header */
1541 int protolen; /* length as stored in the proto field */
1543 /* 1) calculate len according to header */
1544 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
1545 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1548 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
1551 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1552 dev->name, datalen, protolen, hdrlen);
1553 if (protolen > ETH_DATA_LEN)
1554 return datalen; /* Value in proto field not a len, no checks possible */
1557 /* consistency checks: */
1558 if (datalen > ETH_ZLEN) {
1559 if (datalen >= protolen) {
1560 /* more data on wire than in 802 header, trim of
1563 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1564 dev->name, protolen);
1567 /* less data on wire than mentioned in header.
1568 * Discard the packet.
1570 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
1575 /* short packet. Accept only if 802 values are also short */
1576 if (protolen > ETH_ZLEN) {
1577 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
1581 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
1582 dev->name, datalen);
1587 static void nv_rx_process(struct net_device *dev)
1589 struct fe_priv *np = netdev_priv(dev);
1594 struct sk_buff *skb;
1597 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1598 break; /* we scanned the whole ring - do not continue */
1600 i = np->cur_rx % np->rx_ring_size;
1601 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1602 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
1603 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1605 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
1606 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1607 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow);
1610 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1611 dev->name, np->cur_rx, Flags);
1613 if (Flags & NV_RX_AVAIL)
1614 break; /* still owned by hardware, */
1617 * the packet is for us - immediately tear down the pci mapping.
1618 * TODO: check if a prefetch of the first cacheline improves
1621 pci_unmap_single(np->pci_dev, np->rx_dma[i],
1622 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data,
1623 PCI_DMA_FROMDEVICE);
1627 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags);
1628 for (j=0; j<64; j++) {
1630 dprintk("\n%03x:", j);
1631 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]);
1635 /* look at what we actually got: */
1636 if (np->desc_ver == DESC_VER_1) {
1637 if (!(Flags & NV_RX_DESCRIPTORVALID))
1640 if (Flags & NV_RX_ERROR) {
1641 if (Flags & NV_RX_MISSEDFRAME) {
1642 np->stats.rx_missed_errors++;
1643 np->stats.rx_errors++;
1646 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1647 np->stats.rx_errors++;
1650 if (Flags & NV_RX_CRCERR) {
1651 np->stats.rx_crc_errors++;
1652 np->stats.rx_errors++;
1655 if (Flags & NV_RX_OVERFLOW) {
1656 np->stats.rx_over_errors++;
1657 np->stats.rx_errors++;
1660 if (Flags & NV_RX_ERROR4) {
1661 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1663 np->stats.rx_errors++;
1667 /* framing errors are soft errors. */
1668 if (Flags & NV_RX_FRAMINGERR) {
1669 if (Flags & NV_RX_SUBSTRACT1) {
1675 if (!(Flags & NV_RX2_DESCRIPTORVALID))
1678 if (Flags & NV_RX2_ERROR) {
1679 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1680 np->stats.rx_errors++;
1683 if (Flags & NV_RX2_CRCERR) {
1684 np->stats.rx_crc_errors++;
1685 np->stats.rx_errors++;
1688 if (Flags & NV_RX2_OVERFLOW) {
1689 np->stats.rx_over_errors++;
1690 np->stats.rx_errors++;
1693 if (Flags & NV_RX2_ERROR4) {
1694 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1696 np->stats.rx_errors++;
1700 /* framing errors are soft errors */
1701 if (Flags & NV_RX2_FRAMINGERR) {
1702 if (Flags & NV_RX2_SUBSTRACT1) {
1707 Flags &= NV_RX2_CHECKSUMMASK;
1708 if (Flags == NV_RX2_CHECKSUMOK1 ||
1709 Flags == NV_RX2_CHECKSUMOK2 ||
1710 Flags == NV_RX2_CHECKSUMOK3) {
1711 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1712 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1714 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1717 /* got a valid packet - forward it to the network core */
1718 skb = np->rx_skbuff[i];
1719 np->rx_skbuff[i] = NULL;
1722 skb->protocol = eth_type_trans(skb, dev);
1723 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1724 dev->name, np->cur_rx, len, skb->protocol);
1725 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) {
1726 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK);
1730 dev->last_rx = jiffies;
1731 np->stats.rx_packets++;
1732 np->stats.rx_bytes += len;
1738 static void set_bufsize(struct net_device *dev)
1740 struct fe_priv *np = netdev_priv(dev);
1742 if (dev->mtu <= ETH_DATA_LEN)
1743 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
1745 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
1749 * nv_change_mtu: dev->change_mtu function
1750 * Called with dev_base_lock held for read.
1752 static int nv_change_mtu(struct net_device *dev, int new_mtu)
1754 struct fe_priv *np = netdev_priv(dev);
1757 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1763 /* return early if the buffer sizes will not change */
1764 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1766 if (old_mtu == new_mtu)
1769 /* synchronized against open : rtnl_lock() held by caller */
1770 if (netif_running(dev)) {
1771 u8 __iomem *base = get_hwbase(dev);
1773 * It seems that the nic preloads valid ring entries into an
1774 * internal buffer. The procedure for flushing everything is
1775 * guessed, there is probably a simpler approach.
1776 * Changing the MTU is a rare event, it shouldn't matter.
1778 nv_disable_irq(dev);
1779 spin_lock_bh(&dev->xmit_lock);
1780 spin_lock(&np->lock);
1785 /* drain rx queue */
1788 /* reinit driver view of the rx queue */
1790 if (nv_init_ring(dev)) {
1791 if (!np->in_shutdown)
1792 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1794 /* reinit nic view of the rx queue */
1795 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1796 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
1797 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
1798 base + NvRegRingSizes);
1800 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1803 /* restart rx engine */
1806 spin_unlock(&np->lock);
1807 spin_unlock_bh(&dev->xmit_lock);
1813 static void nv_copy_mac_to_hw(struct net_device *dev)
1815 u8 __iomem *base = get_hwbase(dev);
1818 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
1819 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
1820 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
1822 writel(mac[0], base + NvRegMacAddrA);
1823 writel(mac[1], base + NvRegMacAddrB);
1827 * nv_set_mac_address: dev->set_mac_address function
1828 * Called with rtnl_lock() held.
1830 static int nv_set_mac_address(struct net_device *dev, void *addr)
1832 struct fe_priv *np = netdev_priv(dev);
1833 struct sockaddr *macaddr = (struct sockaddr*)addr;
1835 if(!is_valid_ether_addr(macaddr->sa_data))
1836 return -EADDRNOTAVAIL;
1838 /* synchronized against open : rtnl_lock() held by caller */
1839 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
1841 if (netif_running(dev)) {
1842 spin_lock_bh(&dev->xmit_lock);
1843 spin_lock_irq(&np->lock);
1845 /* stop rx engine */
1848 /* set mac address */
1849 nv_copy_mac_to_hw(dev);
1851 /* restart rx engine */
1853 spin_unlock_irq(&np->lock);
1854 spin_unlock_bh(&dev->xmit_lock);
1856 nv_copy_mac_to_hw(dev);
1862 * nv_set_multicast: dev->set_multicast function
1863 * Called with dev->xmit_lock held.
1865 static void nv_set_multicast(struct net_device *dev)
1867 struct fe_priv *np = netdev_priv(dev);
1868 u8 __iomem *base = get_hwbase(dev);
1873 memset(addr, 0, sizeof(addr));
1874 memset(mask, 0, sizeof(mask));
1876 if (dev->flags & IFF_PROMISC) {
1877 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1878 pff = NVREG_PFF_PROMISC;
1880 pff = NVREG_PFF_MYADDR;
1882 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
1886 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
1887 if (dev->flags & IFF_ALLMULTI) {
1888 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
1890 struct dev_mc_list *walk;
1892 walk = dev->mc_list;
1893 while (walk != NULL) {
1895 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
1896 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
1904 addr[0] = alwaysOn[0];
1905 addr[1] = alwaysOn[1];
1906 mask[0] = alwaysOn[0] | alwaysOff[0];
1907 mask[1] = alwaysOn[1] | alwaysOff[1];
1910 addr[0] |= NVREG_MCASTADDRA_FORCE;
1911 pff |= NVREG_PFF_ALWAYS;
1912 spin_lock_irq(&np->lock);
1914 writel(addr[0], base + NvRegMulticastAddrA);
1915 writel(addr[1], base + NvRegMulticastAddrB);
1916 writel(mask[0], base + NvRegMulticastMaskA);
1917 writel(mask[1], base + NvRegMulticastMaskB);
1918 writel(pff, base + NvRegPacketFilterFlags);
1919 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
1922 spin_unlock_irq(&np->lock);
1926 * nv_update_linkspeed: Setup the MAC according to the link partner
1927 * @dev: Network device to be configured
1929 * The function queries the PHY and checks if there is a link partner.
1930 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
1931 * set to 10 MBit HD.
1933 * The function returns 0 if there is no link partner and 1 if there is
1934 * a good link partner.
1936 static int nv_update_linkspeed(struct net_device *dev)
1938 struct fe_priv *np = netdev_priv(dev);
1939 u8 __iomem *base = get_hwbase(dev);
1942 int adv_lpa, adv_pause, lpa_pause;
1943 int newls = np->linkspeed;
1944 int newdup = np->duplex;
1947 u32 control_1000, status_1000, phyreg;
1949 /* BMSR_LSTATUS is latched, read it twice:
1950 * we want the current value.
1952 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1953 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1955 if (!(mii_status & BMSR_LSTATUS)) {
1956 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
1958 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1964 if (np->autoneg == 0) {
1965 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1966 dev->name, np->fixed_mode);
1967 if (np->fixed_mode & LPA_100FULL) {
1968 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1970 } else if (np->fixed_mode & LPA_100HALF) {
1971 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
1973 } else if (np->fixed_mode & LPA_10FULL) {
1974 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1977 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1983 /* check auto negotiation is complete */
1984 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
1985 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1986 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
1989 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
1994 if (np->gigabit == PHY_GIGABIT) {
1995 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1996 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
1998 if ((control_1000 & ADVERTISE_1000FULL) &&
1999 (status_1000 & LPA_1000FULL)) {
2000 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2002 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2008 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2009 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2010 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2011 dev->name, adv, lpa);
2013 /* FIXME: handle parallel detection properly */
2014 adv_lpa = lpa & adv;
2015 if (adv_lpa & LPA_100FULL) {
2016 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2018 } else if (adv_lpa & LPA_100HALF) {
2019 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2021 } else if (adv_lpa & LPA_10FULL) {
2022 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2024 } else if (adv_lpa & LPA_10HALF) {
2025 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2028 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
2029 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2034 if (np->duplex == newdup && np->linkspeed == newls)
2037 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2038 dev->name, np->linkspeed, np->duplex, newls, newdup);
2040 np->duplex = newdup;
2041 np->linkspeed = newls;
2043 if (np->gigabit == PHY_GIGABIT) {
2044 phyreg = readl(base + NvRegRandomSeed);
2045 phyreg &= ~(0x3FF00);
2046 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2047 phyreg |= NVREG_RNDSEED_FORCE3;
2048 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2049 phyreg |= NVREG_RNDSEED_FORCE2;
2050 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2051 phyreg |= NVREG_RNDSEED_FORCE;
2052 writel(phyreg, base + NvRegRandomSeed);
2055 phyreg = readl(base + NvRegPhyInterface);
2056 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2057 if (np->duplex == 0)
2059 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2061 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2063 writel(phyreg, base + NvRegPhyInterface);
2065 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2068 writel(np->linkspeed, base + NvRegLinkSpeed);
2071 /* setup pause frame based on advertisement and link partner */
2072 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2074 if (np->duplex != 0) {
2075 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2076 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2078 switch (adv_pause) {
2079 case (ADVERTISE_PAUSE_CAP):
2080 if (lpa_pause & LPA_PAUSE_CAP) {
2081 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
2084 case (ADVERTISE_PAUSE_ASYM):
2085 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2087 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2090 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
2091 if (lpa_pause & LPA_PAUSE_CAP)
2093 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
2095 if (lpa_pause == LPA_PAUSE_ASYM)
2097 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2103 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2104 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2105 if (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE)
2106 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2108 writel(pff, base + NvRegPacketFilterFlags);
2110 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2111 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2112 if (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2113 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2114 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2116 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2117 writel(regmisc, base + NvRegMisc1);
2124 static void nv_linkchange(struct net_device *dev)
2126 if (nv_update_linkspeed(dev)) {
2127 if (!netif_carrier_ok(dev)) {
2128 netif_carrier_on(dev);
2129 printk(KERN_INFO "%s: link up.\n", dev->name);
2133 if (netif_carrier_ok(dev)) {
2134 netif_carrier_off(dev);
2135 printk(KERN_INFO "%s: link down.\n", dev->name);
2141 static void nv_link_irq(struct net_device *dev)
2143 u8 __iomem *base = get_hwbase(dev);
2146 miistat = readl(base + NvRegMIIStatus);
2147 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2148 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2150 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2152 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2155 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2157 struct net_device *dev = (struct net_device *) data;
2158 struct fe_priv *np = netdev_priv(dev);
2159 u8 __iomem *base = get_hwbase(dev);
2163 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2166 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2167 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2168 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2170 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2171 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2174 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2175 if (!(events & np->irqmask))
2178 spin_lock(&np->lock);
2180 spin_unlock(&np->lock);
2183 if (nv_alloc_rx(dev)) {
2184 spin_lock(&np->lock);
2185 if (!np->in_shutdown)
2186 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2187 spin_unlock(&np->lock);
2190 if (events & NVREG_IRQ_LINK) {
2191 spin_lock(&np->lock);
2193 spin_unlock(&np->lock);
2195 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2196 spin_lock(&np->lock);
2198 spin_unlock(&np->lock);
2199 np->link_timeout = jiffies + LINK_TIMEOUT;
2201 if (events & (NVREG_IRQ_TX_ERR)) {
2202 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2205 if (events & (NVREG_IRQ_UNKNOWN)) {
2206 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2209 if (i > max_interrupt_work) {
2210 spin_lock(&np->lock);
2211 /* disable interrupts on the nic */
2212 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2213 writel(0, base + NvRegIrqMask);
2215 writel(np->irqmask, base + NvRegIrqMask);
2218 if (!np->in_shutdown) {
2219 np->nic_poll_irq = np->irqmask;
2220 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2222 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2223 spin_unlock(&np->lock);
2228 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2230 return IRQ_RETVAL(i);
2233 static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2235 struct net_device *dev = (struct net_device *) data;
2236 struct fe_priv *np = netdev_priv(dev);
2237 u8 __iomem *base = get_hwbase(dev);
2241 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
2244 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2245 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2247 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2248 if (!(events & np->irqmask))
2251 spin_lock_irq(&np->lock);
2253 spin_unlock_irq(&np->lock);
2255 if (events & (NVREG_IRQ_TX_ERR)) {
2256 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2259 if (i > max_interrupt_work) {
2260 spin_lock_irq(&np->lock);
2261 /* disable interrupts on the nic */
2262 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2265 if (!np->in_shutdown) {
2266 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
2267 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2269 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2270 spin_unlock_irq(&np->lock);
2275 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
2277 return IRQ_RETVAL(i);
2280 static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2282 struct net_device *dev = (struct net_device *) data;
2283 struct fe_priv *np = netdev_priv(dev);
2284 u8 __iomem *base = get_hwbase(dev);
2288 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
2291 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2292 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2294 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2295 if (!(events & np->irqmask))
2299 if (nv_alloc_rx(dev)) {
2300 spin_lock_irq(&np->lock);
2301 if (!np->in_shutdown)
2302 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2303 spin_unlock_irq(&np->lock);
2306 if (i > max_interrupt_work) {
2307 spin_lock_irq(&np->lock);
2308 /* disable interrupts on the nic */
2309 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2312 if (!np->in_shutdown) {
2313 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
2314 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2316 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2317 spin_unlock_irq(&np->lock);
2322 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2324 return IRQ_RETVAL(i);
2327 static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2329 struct net_device *dev = (struct net_device *) data;
2330 struct fe_priv *np = netdev_priv(dev);
2331 u8 __iomem *base = get_hwbase(dev);
2335 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
2338 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2339 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2341 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2342 if (!(events & np->irqmask))
2345 if (events & NVREG_IRQ_LINK) {
2346 spin_lock_irq(&np->lock);
2348 spin_unlock_irq(&np->lock);
2350 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2351 spin_lock_irq(&np->lock);
2353 spin_unlock_irq(&np->lock);
2354 np->link_timeout = jiffies + LINK_TIMEOUT;
2356 if (events & (NVREG_IRQ_UNKNOWN)) {
2357 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2360 if (i > max_interrupt_work) {
2361 spin_lock_irq(&np->lock);
2362 /* disable interrupts on the nic */
2363 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2366 if (!np->in_shutdown) {
2367 np->nic_poll_irq |= NVREG_IRQ_OTHER;
2368 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2370 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2371 spin_unlock_irq(&np->lock);
2376 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
2378 return IRQ_RETVAL(i);
2381 static void nv_do_nic_poll(unsigned long data)
2383 struct net_device *dev = (struct net_device *) data;
2384 struct fe_priv *np = netdev_priv(dev);
2385 u8 __iomem *base = get_hwbase(dev);
2389 * First disable irq(s) and then
2390 * reenable interrupts on the nic, we have to do this before calling
2391 * nv_nic_irq because that may decide to do otherwise
2394 if (!using_multi_irqs(dev)) {
2395 if (np->msi_flags & NV_MSI_X_ENABLED)
2396 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2398 disable_irq(dev->irq);
2401 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2402 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2403 mask |= NVREG_IRQ_RX_ALL;
2405 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2406 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2407 mask |= NVREG_IRQ_TX_ALL;
2409 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2410 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2411 mask |= NVREG_IRQ_OTHER;
2414 np->nic_poll_irq = 0;
2416 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
2418 writel(mask, base + NvRegIrqMask);
2421 if (!using_multi_irqs(dev)) {
2422 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2423 if (np->msi_flags & NV_MSI_X_ENABLED)
2424 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2426 enable_irq(dev->irq);
2428 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2429 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
2430 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
2432 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
2433 nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
2434 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
2436 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
2437 nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL);
2438 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
2443 #ifdef CONFIG_NET_POLL_CONTROLLER
2444 static void nv_poll_controller(struct net_device *dev)
2446 nv_do_nic_poll((unsigned long) dev);
2450 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2452 struct fe_priv *np = netdev_priv(dev);
2453 strcpy(info->driver, "forcedeth");
2454 strcpy(info->version, FORCEDETH_VERSION);
2455 strcpy(info->bus_info, pci_name(np->pci_dev));
2458 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
2460 struct fe_priv *np = netdev_priv(dev);
2461 wolinfo->supported = WAKE_MAGIC;
2463 spin_lock_irq(&np->lock);
2465 wolinfo->wolopts = WAKE_MAGIC;
2466 spin_unlock_irq(&np->lock);
2469 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
2471 struct fe_priv *np = netdev_priv(dev);
2472 u8 __iomem *base = get_hwbase(dev);
2474 spin_lock_irq(&np->lock);
2475 if (wolinfo->wolopts == 0) {
2476 writel(0, base + NvRegWakeUpFlags);
2479 if (wolinfo->wolopts & WAKE_MAGIC) {
2480 writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
2483 spin_unlock_irq(&np->lock);
2487 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2489 struct fe_priv *np = netdev_priv(dev);
2492 spin_lock_irq(&np->lock);
2493 ecmd->port = PORT_MII;
2494 if (!netif_running(dev)) {
2495 /* We do not track link speed / duplex setting if the
2496 * interface is disabled. Force a link check */
2497 nv_update_linkspeed(dev);
2499 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
2500 case NVREG_LINKSPEED_10:
2501 ecmd->speed = SPEED_10;
2503 case NVREG_LINKSPEED_100:
2504 ecmd->speed = SPEED_100;
2506 case NVREG_LINKSPEED_1000:
2507 ecmd->speed = SPEED_1000;
2510 ecmd->duplex = DUPLEX_HALF;
2512 ecmd->duplex = DUPLEX_FULL;
2514 ecmd->autoneg = np->autoneg;
2516 ecmd->advertising = ADVERTISED_MII;
2518 ecmd->advertising |= ADVERTISED_Autoneg;
2519 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2521 adv = np->fixed_mode;
2523 if (adv & ADVERTISE_10HALF)
2524 ecmd->advertising |= ADVERTISED_10baseT_Half;
2525 if (adv & ADVERTISE_10FULL)
2526 ecmd->advertising |= ADVERTISED_10baseT_Full;
2527 if (adv & ADVERTISE_100HALF)
2528 ecmd->advertising |= ADVERTISED_100baseT_Half;
2529 if (adv & ADVERTISE_100FULL)
2530 ecmd->advertising |= ADVERTISED_100baseT_Full;
2531 if (np->autoneg && np->gigabit == PHY_GIGABIT) {
2532 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2533 if (adv & ADVERTISE_1000FULL)
2534 ecmd->advertising |= ADVERTISED_1000baseT_Full;
2537 ecmd->supported = (SUPPORTED_Autoneg |
2538 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2539 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2541 if (np->gigabit == PHY_GIGABIT)
2542 ecmd->supported |= SUPPORTED_1000baseT_Full;
2544 ecmd->phy_address = np->phyaddr;
2545 ecmd->transceiver = XCVR_EXTERNAL;
2547 /* ignore maxtxpkt, maxrxpkt for now */
2548 spin_unlock_irq(&np->lock);
2552 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2554 struct fe_priv *np = netdev_priv(dev);
2556 if (ecmd->port != PORT_MII)
2558 if (ecmd->transceiver != XCVR_EXTERNAL)
2560 if (ecmd->phy_address != np->phyaddr) {
2561 /* TODO: support switching between multiple phys. Should be
2562 * trivial, but not enabled due to lack of test hardware. */
2565 if (ecmd->autoneg == AUTONEG_ENABLE) {
2568 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2569 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
2570 if (np->gigabit == PHY_GIGABIT)
2571 mask |= ADVERTISED_1000baseT_Full;
2573 if ((ecmd->advertising & mask) == 0)
2576 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2577 /* Note: autonegotiation disable, speed 1000 intentionally
2578 * forbidden - noone should need that. */
2580 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2582 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2588 spin_lock_irq(&np->lock);
2589 if (ecmd->autoneg == AUTONEG_ENABLE) {
2594 /* advertise only what has been requested */
2595 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2596 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2597 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2598 adv |= ADVERTISE_10HALF;
2599 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2600 adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
2601 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2602 adv |= ADVERTISE_100HALF;
2603 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2604 adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
2605 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
2607 if (np->gigabit == PHY_GIGABIT) {
2608 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2609 adv &= ~ADVERTISE_1000FULL;
2610 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
2611 adv |= ADVERTISE_1000FULL;
2612 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
2615 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2616 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2617 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2624 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2625 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2626 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
2627 adv |= ADVERTISE_10HALF;
2628 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
2629 adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
2630 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
2631 adv |= ADVERTISE_100HALF;
2632 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
2633 adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
2634 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
2635 np->fixed_mode = adv;
2637 if (np->gigabit == PHY_GIGABIT) {
2638 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2639 adv &= ~ADVERTISE_1000FULL;
2640 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
2643 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2644 bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
2645 if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
2646 bmcr |= BMCR_FULLDPLX;
2647 if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
2648 bmcr |= BMCR_SPEED100;
2649 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2651 if (netif_running(dev)) {
2652 /* Wait a bit and then reconfigure the nic. */
2657 spin_unlock_irq(&np->lock);
2662 #define FORCEDETH_REGS_VER 1
2664 static int nv_get_regs_len(struct net_device *dev)
2666 struct fe_priv *np = netdev_priv(dev);
2667 return np->register_size;
2670 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2672 struct fe_priv *np = netdev_priv(dev);
2673 u8 __iomem *base = get_hwbase(dev);
2677 regs->version = FORCEDETH_REGS_VER;
2678 spin_lock_irq(&np->lock);
2679 for (i = 0;i <= np->register_size/sizeof(u32); i++)
2680 rbuf[i] = readl(base + i*sizeof(u32));
2681 spin_unlock_irq(&np->lock);
2684 static int nv_nway_reset(struct net_device *dev)
2686 struct fe_priv *np = netdev_priv(dev);
2689 spin_lock_irq(&np->lock);
2693 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
2694 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
2695 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
2701 spin_unlock_irq(&np->lock);
2706 static int nv_set_tso(struct net_device *dev, u32 value)
2708 struct fe_priv *np = netdev_priv(dev);
2710 if ((np->driver_data & DEV_HAS_CHECKSUM))
2711 return ethtool_op_set_tso(dev, value);
2716 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
2718 struct fe_priv *np = netdev_priv(dev);
2720 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
2721 ring->rx_mini_max_pending = 0;
2722 ring->rx_jumbo_max_pending = 0;
2723 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
2725 ring->rx_pending = np->rx_ring_size;
2726 ring->rx_mini_pending = 0;
2727 ring->rx_jumbo_pending = 0;
2728 ring->tx_pending = np->tx_ring_size;
2731 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
2733 struct fe_priv *np = netdev_priv(dev);
2734 u8 __iomem *base = get_hwbase(dev);
2735 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
2736 dma_addr_t ring_addr;
2738 if (ring->rx_pending < RX_RING_MIN ||
2739 ring->tx_pending < TX_RING_MIN ||
2740 ring->rx_mini_pending != 0 ||
2741 ring->rx_jumbo_pending != 0 ||
2742 (np->desc_ver == DESC_VER_1 &&
2743 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
2744 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
2745 (np->desc_ver != DESC_VER_1 &&
2746 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
2747 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
2751 /* allocate new rings */
2752 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2753 rxtx_ring = pci_alloc_consistent(np->pci_dev,
2754 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
2757 rxtx_ring = pci_alloc_consistent(np->pci_dev,
2758 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
2761 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
2762 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
2763 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
2764 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
2765 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
2766 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
2767 /* fall back to old rings */
2768 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2770 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
2771 rxtx_ring, ring_addr);
2774 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
2775 rxtx_ring, ring_addr);
2790 if (netif_running(dev)) {
2791 nv_disable_irq(dev);
2792 spin_lock_bh(&dev->xmit_lock);
2793 spin_lock(&np->lock);
2805 /* set new values */
2806 np->rx_ring_size = ring->rx_pending;
2807 np->tx_ring_size = ring->tx_pending;
2808 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
2809 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
2810 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2811 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
2812 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
2814 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
2815 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
2817 np->rx_skbuff = (struct sk_buff**)rx_skbuff;
2818 np->rx_dma = (dma_addr_t*)rx_dma;
2819 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
2820 np->tx_dma = (dma_addr_t*)tx_dma;
2821 np->tx_dma_len = (unsigned int*)tx_dma_len;
2822 np->ring_addr = ring_addr;
2824 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
2825 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
2826 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
2827 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
2828 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
2830 if (netif_running(dev)) {
2831 /* reinit driver view of the queues */
2833 if (nv_init_ring(dev)) {
2834 if (!np->in_shutdown)
2835 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2838 /* reinit nic view of the queues */
2839 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2840 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2841 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2842 base + NvRegRingSizes);
2844 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2847 /* restart engines */
2850 spin_unlock(&np->lock);
2851 spin_unlock_bh(&dev->xmit_lock);
2859 static struct ethtool_ops ops = {
2860 .get_drvinfo = nv_get_drvinfo,
2861 .get_link = ethtool_op_get_link,
2862 .get_wol = nv_get_wol,
2863 .set_wol = nv_set_wol,
2864 .get_settings = nv_get_settings,
2865 .set_settings = nv_set_settings,
2866 .get_regs_len = nv_get_regs_len,
2867 .get_regs = nv_get_regs,
2868 .nway_reset = nv_nway_reset,
2869 .get_perm_addr = ethtool_op_get_perm_addr,
2870 .get_tso = ethtool_op_get_tso,
2871 .set_tso = nv_set_tso,
2872 .get_ringparam = nv_get_ringparam,
2873 .set_ringparam = nv_set_ringparam,
2876 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2878 struct fe_priv *np = get_nvpriv(dev);
2880 spin_lock_irq(&np->lock);
2882 /* save vlan group */
2886 /* enable vlan on MAC */
2887 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
2889 /* disable vlan on MAC */
2890 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
2891 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
2894 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2896 spin_unlock_irq(&np->lock);
2899 static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2904 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2906 u8 __iomem *base = get_hwbase(dev);
2910 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
2911 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
2912 * the remaining 8 interrupts.
2914 for (i = 0; i < 8; i++) {
2915 if ((irqmask >> i) & 0x1) {
2916 msixmap |= vector << (i << 2);
2919 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
2922 for (i = 0; i < 8; i++) {
2923 if ((irqmask >> (i + 8)) & 0x1) {
2924 msixmap |= vector << (i << 2);
2927 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2930 static int nv_request_irq(struct net_device *dev)
2932 struct fe_priv *np = get_nvpriv(dev);
2933 u8 __iomem *base = get_hwbase(dev);
2937 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2938 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2939 np->msi_x_entry[i].entry = i;
2941 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2942 np->msi_flags |= NV_MSI_X_ENABLED;
2943 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2944 /* Request irq for rx handling */
2945 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2946 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2947 pci_disable_msix(np->pci_dev);
2948 np->msi_flags &= ~NV_MSI_X_ENABLED;
2951 /* Request irq for tx handling */
2952 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2953 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2954 pci_disable_msix(np->pci_dev);
2955 np->msi_flags &= ~NV_MSI_X_ENABLED;
2958 /* Request irq for link and timer handling */
2959 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2960 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2961 pci_disable_msix(np->pci_dev);
2962 np->msi_flags &= ~NV_MSI_X_ENABLED;
2965 /* map interrupts to their respective vector */
2966 writel(0, base + NvRegMSIXMap0);
2967 writel(0, base + NvRegMSIXMap1);
2968 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2969 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2970 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2972 /* Request irq for all interrupts */
2973 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2974 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2975 pci_disable_msix(np->pci_dev);
2976 np->msi_flags &= ~NV_MSI_X_ENABLED;
2980 /* map interrupts to vector 0 */
2981 writel(0, base + NvRegMSIXMap0);
2982 writel(0, base + NvRegMSIXMap1);
2986 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2987 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2988 np->msi_flags |= NV_MSI_ENABLED;
2989 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2990 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2991 pci_disable_msi(np->pci_dev);
2992 np->msi_flags &= ~NV_MSI_ENABLED;
2996 /* map interrupts to vector 0 */
2997 writel(0, base + NvRegMSIMap0);
2998 writel(0, base + NvRegMSIMap1);
2999 /* enable msi vector 0 */
3000 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3004 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
3010 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3012 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3017 static void nv_free_irq(struct net_device *dev)
3019 struct fe_priv *np = get_nvpriv(dev);
3022 if (np->msi_flags & NV_MSI_X_ENABLED) {
3023 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3024 free_irq(np->msi_x_entry[i].vector, dev);
3026 pci_disable_msix(np->pci_dev);
3027 np->msi_flags &= ~NV_MSI_X_ENABLED;
3029 free_irq(np->pci_dev->irq, dev);
3030 if (np->msi_flags & NV_MSI_ENABLED) {
3031 pci_disable_msi(np->pci_dev);
3032 np->msi_flags &= ~NV_MSI_ENABLED;
3037 static int nv_open(struct net_device *dev)
3039 struct fe_priv *np = netdev_priv(dev);
3040 u8 __iomem *base = get_hwbase(dev);
3044 dprintk(KERN_DEBUG "nv_open: begin\n");
3046 /* 1) erase previous misconfiguration */
3047 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3049 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3050 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3051 writel(0, base + NvRegMulticastAddrB);
3052 writel(0, base + NvRegMulticastMaskA);
3053 writel(0, base + NvRegMulticastMaskB);
3054 writel(0, base + NvRegPacketFilterFlags);
3056 writel(0, base + NvRegTransmitterControl);
3057 writel(0, base + NvRegReceiverControl);
3059 writel(0, base + NvRegAdapterControl);
3061 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3062 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3064 /* 2) initialize descriptor rings */
3066 oom = nv_init_ring(dev);
3068 writel(0, base + NvRegLinkSpeed);
3069 writel(0, base + NvRegUnknownTransmitterReg);
3071 writel(0, base + NvRegUnknownSetupReg6);
3073 np->in_shutdown = 0;
3075 /* 3) set mac address */
3076 nv_copy_mac_to_hw(dev);
3078 /* 4) give hw rings */
3079 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3080 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3081 base + NvRegRingSizes);
3083 /* 5) continue setup */
3084 writel(np->linkspeed, base + NvRegLinkSpeed);
3085 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
3086 writel(np->txrxctl_bits, base + NvRegTxRxControl);
3087 writel(np->vlanctl_bits, base + NvRegVlanControl);
3089 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
3090 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
3091 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
3092 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
3094 writel(0, base + NvRegUnknownSetupReg4);
3095 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3096 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3098 /* 6) continue setup */
3099 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3100 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3101 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
3102 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3104 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
3105 get_random_bytes(&i, sizeof(i));
3106 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
3107 writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1);
3108 writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2);
3109 if (poll_interval == -1) {
3110 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
3111 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
3113 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
3116 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
3117 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
3118 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
3119 base + NvRegAdapterControl);
3120 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
3121 writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
3122 writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
3124 i = readl(base + NvRegPowerState);
3125 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
3126 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
3130 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
3132 nv_disable_hw_interrupts(dev, np->irqmask);
3134 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3135 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3138 if (nv_request_irq(dev)) {
3142 /* ask for interrupts */
3143 nv_enable_hw_interrupts(dev, np->irqmask);
3145 spin_lock_irq(&np->lock);
3146 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3147 writel(0, base + NvRegMulticastAddrB);
3148 writel(0, base + NvRegMulticastMaskA);
3149 writel(0, base + NvRegMulticastMaskB);
3150 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
3151 /* One manual link speed update: Interrupts are enabled, future link
3152 * speed changes cause interrupts and are handled by nv_link_irq().
3156 miistat = readl(base + NvRegMIIStatus);
3157 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
3158 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
3160 /* set linkspeed to invalid value, thus force nv_update_linkspeed
3163 ret = nv_update_linkspeed(dev);
3166 netif_start_queue(dev);
3168 netif_carrier_on(dev);
3170 printk("%s: no link during initialization.\n", dev->name);
3171 netif_carrier_off(dev);
3174 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3175 spin_unlock_irq(&np->lock);
3183 static int nv_close(struct net_device *dev)
3185 struct fe_priv *np = netdev_priv(dev);
3188 spin_lock_irq(&np->lock);
3189 np->in_shutdown = 1;
3190 spin_unlock_irq(&np->lock);
3191 synchronize_irq(dev->irq);
3193 del_timer_sync(&np->oom_kick);
3194 del_timer_sync(&np->nic_poll);
3196 netif_stop_queue(dev);
3197 spin_lock_irq(&np->lock);
3202 /* disable interrupts on the nic or we will lock up */
3203 base = get_hwbase(dev);
3204 nv_disable_hw_interrupts(dev, np->irqmask);
3206 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
3208 spin_unlock_irq(&np->lock);
3217 /* special op: write back the misordered MAC address - otherwise
3218 * the next nv_probe would see a wrong address.
3220 writel(np->orig_mac[0], base + NvRegMacAddrA);
3221 writel(np->orig_mac[1], base + NvRegMacAddrB);
3223 /* FIXME: power down nic */
3228 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
3230 struct net_device *dev;
3237 dev = alloc_etherdev(sizeof(struct fe_priv));
3242 np = netdev_priv(dev);
3243 np->pci_dev = pci_dev;
3244 spin_lock_init(&np->lock);
3245 SET_MODULE_OWNER(dev);
3246 SET_NETDEV_DEV(dev, &pci_dev->dev);
3248 init_timer(&np->oom_kick);
3249 np->oom_kick.data = (unsigned long) dev;
3250 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
3251 init_timer(&np->nic_poll);
3252 np->nic_poll.data = (unsigned long) dev;
3253 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
3255 err = pci_enable_device(pci_dev);
3257 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
3258 err, pci_name(pci_dev));
3262 pci_set_master(pci_dev);
3264 err = pci_request_regions(pci_dev, DRV_NAME);
3268 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
3269 np->register_size = NV_PCI_REGSZ_VER2;
3271 np->register_size = NV_PCI_REGSZ_VER1;
3275 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3276 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
3277 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
3278 pci_resource_len(pci_dev, i),
3279 pci_resource_flags(pci_dev, i));
3280 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
3281 pci_resource_len(pci_dev, i) >= np->register_size) {
3282 addr = pci_resource_start(pci_dev, i);
3286 if (i == DEVICE_COUNT_RESOURCE) {
3287 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
3292 /* copy of driver data */
3293 np->driver_data = id->driver_data;
3295 /* handle different descriptor versions */
3296 if (id->driver_data & DEV_HAS_HIGH_DMA) {
3297 /* packet format 3: supports 40-bit addressing */
3298 np->desc_ver = DESC_VER_3;
3299 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
3300 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
3301 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
3304 dev->features |= NETIF_F_HIGHDMA;
3305 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
3307 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
3308 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
3311 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
3312 /* packet format 2: supports jumbo frames */
3313 np->desc_ver = DESC_VER_2;
3314 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
3316 /* original packet format */
3317 np->desc_ver = DESC_VER_1;
3318 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
3321 np->pkt_limit = NV_PKTLIMIT_1;
3322 if (id->driver_data & DEV_HAS_LARGEDESC)
3323 np->pkt_limit = NV_PKTLIMIT_2;
3325 if (id->driver_data & DEV_HAS_CHECKSUM) {
3326 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
3327 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3329 dev->features |= NETIF_F_TSO;
3333 np->vlanctl_bits = 0;
3334 if (id->driver_data & DEV_HAS_VLAN) {
3335 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
3336 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
3337 dev->vlan_rx_register = nv_vlan_rx_register;
3338 dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
3342 if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
3343 np->msi_flags |= NV_MSI_CAPABLE;
3345 if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
3346 np->msi_flags |= NV_MSI_X_CAPABLE;
3349 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
3350 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
3351 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
3356 np->base = ioremap(addr, np->register_size);
3359 dev->base_addr = (unsigned long)np->base;
3361 dev->irq = pci_dev->irq;
3363 np->rx_ring_size = RX_RING_DEFAULT;
3364 np->tx_ring_size = TX_RING_DEFAULT;
3365 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
3366 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
3368 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3369 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
3370 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
3372 if (!np->rx_ring.orig)
3374 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
3376 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
3377 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
3379 if (!np->rx_ring.ex)
3381 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3383 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
3384 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
3385 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
3386 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
3387 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
3388 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
3390 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
3391 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
3392 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3393 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3394 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3396 dev->open = nv_open;
3397 dev->stop = nv_close;
3398 dev->hard_start_xmit = nv_start_xmit;
3399 dev->get_stats = nv_get_stats;
3400 dev->change_mtu = nv_change_mtu;
3401 dev->set_mac_address = nv_set_mac_address;
3402 dev->set_multicast_list = nv_set_multicast;
3403 #ifdef CONFIG_NET_POLL_CONTROLLER
3404 dev->poll_controller = nv_poll_controller;
3406 SET_ETHTOOL_OPS(dev, &ops);
3407 dev->tx_timeout = nv_tx_timeout;
3408 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
3410 pci_set_drvdata(pci_dev, dev);
3412 /* read the mac address */
3413 base = get_hwbase(dev);
3414 np->orig_mac[0] = readl(base + NvRegMacAddrA);
3415 np->orig_mac[1] = readl(base + NvRegMacAddrB);
3417 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
3418 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
3419 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
3420 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
3421 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
3422 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
3423 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3425 if (!is_valid_ether_addr(dev->perm_addr)) {
3427 * Bad mac address. At least one bios sets the mac address
3428 * to 01:23:45:67:89:ab
3430 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
3432 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3433 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3434 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
3435 dev->dev_addr[0] = 0x00;
3436 dev->dev_addr[1] = 0x00;
3437 dev->dev_addr[2] = 0x6c;
3438 get_random_bytes(&dev->dev_addr[3], 3);
3441 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
3442 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3443 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3446 writel(0, base + NvRegWakeUpFlags);
3449 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
3451 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
3453 /* take phy and nic out of low power mode */
3454 powerstate = readl(base + NvRegPowerState2);
3455 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
3456 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
3457 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
3458 revision_id >= 0xA3)
3459 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
3460 writel(powerstate, base + NvRegPowerState2);
3463 if (np->desc_ver == DESC_VER_1) {
3464 np->tx_flags = NV_TX_VALID;
3466 np->tx_flags = NV_TX2_VALID;
3468 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
3469 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3470 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3471 np->msi_flags |= 0x0003;
3473 np->irqmask = NVREG_IRQMASK_CPU;
3474 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
3475 np->msi_flags |= 0x0001;
3478 if (id->driver_data & DEV_NEED_TIMERIRQ)
3479 np->irqmask |= NVREG_IRQ_TIMER;
3480 if (id->driver_data & DEV_NEED_LINKTIMER) {
3481 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
3482 np->need_linktimer = 1;
3483 np->link_timeout = jiffies + LINK_TIMEOUT;
3485 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
3486 np->need_linktimer = 0;
3489 /* find a suitable phy */
3490 for (i = 1; i <= 32; i++) {
3492 int phyaddr = i & 0x1F;
3494 spin_lock_irq(&np->lock);
3495 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
3496 spin_unlock_irq(&np->lock);
3497 if (id1 < 0 || id1 == 0xffff)
3499 spin_lock_irq(&np->lock);
3500 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
3501 spin_unlock_irq(&np->lock);
3502 if (id2 < 0 || id2 == 0xffff)
3505 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
3506 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
3507 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
3508 pci_name(pci_dev), id1, id2, phyaddr);
3509 np->phyaddr = phyaddr;
3510 np->phy_oui = id1 | id2;
3514 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
3522 /* set default link speed settings */
3523 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3527 err = register_netdev(dev);
3529 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
3532 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
3533 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
3539 pci_set_drvdata(pci_dev, NULL);
3543 iounmap(get_hwbase(dev));
3545 pci_release_regions(pci_dev);
3547 pci_disable_device(pci_dev);
3554 static void __devexit nv_remove(struct pci_dev *pci_dev)
3556 struct net_device *dev = pci_get_drvdata(pci_dev);
3558 unregister_netdev(dev);
3560 /* free all structures */
3562 iounmap(get_hwbase(dev));
3563 pci_release_regions(pci_dev);
3564 pci_disable_device(pci_dev);
3566 pci_set_drvdata(pci_dev, NULL);
3569 static struct pci_device_id pci_tbl[] = {
3570 { /* nForce Ethernet Controller */
3571 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
3572 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
3574 { /* nForce2 Ethernet Controller */
3575 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
3576 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
3578 { /* nForce3 Ethernet Controller */
3579 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
3580 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
3582 { /* nForce3 Ethernet Controller */
3583 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
3584 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
3586 { /* nForce3 Ethernet Controller */
3587 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
3588 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
3590 { /* nForce3 Ethernet Controller */
3591 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
3592 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
3594 { /* nForce3 Ethernet Controller */
3595 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
3596 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
3598 { /* CK804 Ethernet Controller */
3599 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
3600 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
3602 { /* CK804 Ethernet Controller */
3603 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
3604 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
3606 { /* MCP04 Ethernet Controller */
3607 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
3608 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
3610 { /* MCP04 Ethernet Controller */
3611 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
3612 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
3614 { /* MCP51 Ethernet Controller */
3615 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
3616 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3618 { /* MCP51 Ethernet Controller */
3619 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
3620 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3622 { /* MCP55 Ethernet Controller */
3623 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
3624 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
3626 { /* MCP55 Ethernet Controller */
3627 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
3628 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
3633 static struct pci_driver driver = {
3634 .name = "forcedeth",
3635 .id_table = pci_tbl,
3637 .remove = __devexit_p(nv_remove),
3641 static int __init init_nic(void)
3643 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
3644 return pci_module_init(&driver);
3647 static void __exit exit_nic(void)
3649 pci_unregister_driver(&driver);
3652 module_param(max_interrupt_work, int, 0);
3653 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
3654 module_param(optimization_mode, int, 0);
3655 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
3656 module_param(poll_interval, int, 0);
3657 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
3658 module_param(disable_msi, int, 0);
3659 MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
3660 module_param(disable_msix, int, 0);
3661 MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
3663 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
3664 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
3665 MODULE_LICENSE("GPL");
3667 MODULE_DEVICE_TABLE(pci, pci_tbl);
3669 module_init(init_nic);
3670 module_exit(exit_nic);